code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_A = """"""
_A = """"""
_A = """"""
_A = 1 # (0 is vertical, 1 is horizontal)
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = get_dataset(__UpperCAmelCase , __UpperCAmelCase )
print("""Processing...""" )
lowerCAmelCase__ : List[str] = update_image_and_anno(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for index, image in enumerate(__UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ : Optional[Any] = random_chars(32 )
lowerCAmelCase__ : Optional[int] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase__ : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__UpperCAmelCase )} with {file_name}""" )
lowerCAmelCase__ : Optional[int] = []
for anno in new_annos[index]:
lowerCAmelCase__ : Tuple = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__UpperCAmelCase )
with open(f"""/{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple[list, list]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(__UpperCAmelCase , """*.txt""" ) ):
lowerCAmelCase__ : List[str] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__UpperCAmelCase ) as in_file:
lowerCAmelCase__ : Any = in_file.readlines()
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase , f"""{label_name}.jpg""" )
lowerCAmelCase__ : Tuple = []
for obj_list in obj_lists:
lowerCAmelCase__ : List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCAmelCase )
labels.append(__UpperCAmelCase )
return img_paths, labels
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 ) -> tuple[list, list, list]:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Tuple = []
for idx in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ : str = []
lowerCAmelCase__ : int = img_list[idx]
path_list.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = anno_list[idx]
lowerCAmelCase__ : Tuple = cva.imread(__UpperCAmelCase )
if flip_type == 1:
lowerCAmelCase__ : str = cva.flip(__UpperCAmelCase , __UpperCAmelCase )
for bbox in img_annos:
lowerCAmelCase__ : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCAmelCase__ : Tuple = cva.flip(__UpperCAmelCase , __UpperCAmelCase )
for bbox in img_annos:
lowerCAmelCase__ : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCAmelCase )
new_imgs_list.append(__UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowercase_ ( __UpperCAmelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ : Any = ascii_lowercase + digits
return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 299 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548 | 0 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Tuple = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[int] = '''ml.p3.2xlarge'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''accelerate_sagemaker_execution_role'''
SCREAMING_SNAKE_CASE : int = '''hf-sm'''
SCREAMING_SNAKE_CASE : Optional[int] = '''us-east-1'''
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = '''accelerate-sagemaker-1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''1.6'''
SCREAMING_SNAKE_CASE : int = '''4.4'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''train.py'''
SCREAMING_SNAKE_CASE : Dict = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
SCREAMING_SNAKE_CASE : str = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['do_train'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['epochs'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['learning_rate'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['max_steps'] , _SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 711 |
'''simple docstring'''
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case = '''\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'''
snake_case = '''\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'''
snake_case = '''\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def a ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def a ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=False ):
"""simple docstring"""
if rouge_types is None:
_lowerCAmelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_lowerCAmelCase = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
_lowerCAmelCase = scoring.BootstrapAggregator()
else:
_lowerCAmelCase = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCAmelCase = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
_lowerCAmelCase = aggregator.aggregate()
else:
_lowerCAmelCase = {}
for key in scores[0]:
_lowerCAmelCase = [score[key] for score in scores]
return result
| 309 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = BlenderbotSmallConfig
lowercase_ = {}
lowercase_ = "gelu"
def __init__(self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=20 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Union[str, Any]=0 , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =parent
lowerCamelCase__: str =batch_size
lowerCamelCase__: Union[str, Any] =seq_length
lowerCamelCase__: List[Any] =is_training
lowerCamelCase__: Optional[int] =use_labels
lowerCamelCase__: Dict =vocab_size
lowerCamelCase__: List[str] =hidden_size
lowerCamelCase__: List[str] =num_hidden_layers
lowerCamelCase__: str =num_attention_heads
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: List[Any] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: Union[str, Any] =eos_token_id
lowerCamelCase__: List[str] =pad_token_id
lowerCamelCase__: str =bos_token_id
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowerCamelCase__: Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowerCamelCase__: str =tf.concat([input_ids, eos_tensor] , axis=1)
lowerCamelCase__: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: Union[str, Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__: Dict =prepare_blenderbot_small_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =TFBlenderbotSmallModel(config=UpperCAmelCase_).get_decoder()
lowerCamelCase__: Tuple =inputs_dict["input_ids"]
lowerCamelCase__: Union[str, Any] =input_ids[:1, :]
lowerCamelCase__: List[str] =inputs_dict["attention_mask"][:1, :]
lowerCamelCase__: Optional[int] =inputs_dict["head_mask"]
lowerCamelCase__: Optional[int] =1
# first forward pass
lowerCamelCase__: str =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_)
lowerCamelCase__: List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCamelCase__: Optional[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
lowerCamelCase__: Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1)
lowerCamelCase__: Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1)
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
lowerCamelCase__: Optional[int] =int(ids_tensor((1,) , output_from_past.shape[-1]))
lowerCamelCase__: List[Any] =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: str =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def lowerCAmelCase_ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Dict =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__: List[str] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__: List[str] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =TFBlenderbotSmallModelTester(self)
lowerCamelCase__: int =ConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowercase_ = "facebook/blenderbot_small-90M"
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =self.tokenizer(self.src_text , return_tensors="tf")
lowerCamelCase__: List[str] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Optional[Any] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 704 |
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( __a ) -> dict[str, str]:
"""simple docstring"""
lowerCamelCase__: Dict =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__: Union[str, Any] =remove_duplicates(key.upper() )
lowerCamelCase__: List[Any] =len(__a )
# First fill cipher with key characters
lowerCamelCase__: Optional[Any] ={alphabet[i]: char for i, char in enumerate(__a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__a ) , 26 ):
lowerCamelCase__: Optional[int] =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__: List[Any] =alphabet[i - offset]
lowerCamelCase__: str =char
return cipher_alphabet
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__a , __a ) for ch in message.upper() )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[Any] ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__a , __a ) for ch in message.upper() )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =input("Enter message to encode or decode: " ).strip()
lowerCamelCase__: Dict =input("Enter keyword: " ).strip()
lowerCamelCase__: Any =input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCamelCase__: Union[str, Any] ={"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCamelCase__: List[Any] =create_cipher_map(__a )
print(func(__a , __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 437 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __UpperCAmelCase ( __A , __A ) -> tuple[str, float]:
'''simple docstring'''
UpperCAmelCase__ = len([g for position, g in enumerate(__SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(__SCREAMING_SNAKE_CASE ))
def __UpperCAmelCase ( __A , __A ) -> tuple[str, str]:
'''simple docstring'''
UpperCAmelCase__ = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = list(__SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ = random.choice(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __A , __A , __A , ) -> list[str]:
'''simple docstring'''
UpperCAmelCase__ = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ = int(parent_a[1] * 1_0_0 ) + 1
UpperCAmelCase__ = 1_0 if child_n >= 1_0 else child_n
for _ in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = population_score[random.randint(0 , __SCREAMING_SNAKE_CASE )][0]
UpperCAmelCase__ = crossover(parent_a[0] , __SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return pop
def __UpperCAmelCase ( __A , __A , __A = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__SCREAMING_SNAKE_CASE )
# Generate random starting population.
UpperCAmelCase__ = []
for _ in range(__SCREAMING_SNAKE_CASE ):
population.append("".join([random.choice(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ = [evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ = sorted(__SCREAMING_SNAKE_CASE , key=lambda __A : x[1] , reverse=__SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ = [
(item, score / len(__SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(__SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
A = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A, A, A = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 475 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase: str = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase: Optional[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Optional[int] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :List[str] = inspect.getfile(accelerate.test_utils )
lowercase :List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase :str = test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE ( self: Tuple ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def SCREAMING_SNAKE_CASE ( self: int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowercase :str = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 453 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
SCREAMING_SNAKE_CASE_:Union[str, Any] = """Usage of script: script_name <size_of_canvas:int>"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCamelCase ( _lowerCAmelCase ) -> list[list[bool]]:
"""simple docstring"""
A : str = [[False for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
return canvas
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
for i, row in enumerate(_lowerCAmelCase ):
for j, _ in enumerate(_lowerCAmelCase ):
A : List[str] = bool(random.getrandbits(1 ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> list[list[bool]]:
"""simple docstring"""
A : str = np.array(_lowerCAmelCase )
A : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCAmelCase ):
for c, pt in enumerate(_lowerCAmelCase ):
A : Any = __judge_point(
_lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Tuple = 0
A : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A : Optional[int] = pt
if pt:
if alive < 2:
A : Optional[int] = False
elif alive == 2 or alive == 3:
A : Tuple = True
elif alive > 3:
A : Union[str, Any] = False
else:
if alive == 3:
A : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
SCREAMING_SNAKE_CASE_:Any = int(sys.argv[1])
# main working structure of this module.
SCREAMING_SNAKE_CASE_:Any = create_canvas(canvas_size)
seed(c)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_:int = plt.subplots()
fig.show()
SCREAMING_SNAKE_CASE_:List[str] = ListedColormap(["""w""", """k"""])
try:
while True:
SCREAMING_SNAKE_CASE_:Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *__a : Optional[int] , **__a : Dict ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 705 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''ViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Dict = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> float:
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
_lowerCamelCase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = len(lowercase_ )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 661 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
__lowerCamelCase = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = BigBirdTokenizer
UpperCAmelCase__ = ["input_ids", "attention_mask"]
UpperCAmelCase__ = []
def __init__( self : Union[str, Any] , __snake_case : str=None , __snake_case : Any=None , __snake_case : Optional[int]="<unk>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Optional[int]="<pad>" , __snake_case : Tuple="[SEP]" , __snake_case : Optional[Any]="[MASK]" , __snake_case : str="[CLS]" , **__snake_case : Dict , ) -> Dict:
__magic_name__: Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
__magic_name__: Any = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
__magic_name__: Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
__magic_name__: List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
__magic_name__: List[str] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
__magic_name__: Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__: List[str] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
__magic_name__: Union[str, Any] = vocab_file
__magic_name__: Any = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: Any = [self.sep_token_id]
__magic_name__: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: List[str] = [self.sep_token_id]
__magic_name__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__: int = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 213 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "SpeechT5FeatureExtractor"
UpperCAmelCase__ = "SpeechT5Tokenizer"
def __init__( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Tuple:
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , *__snake_case : Tuple , **__snake_case : Tuple ) -> Any:
__magic_name__: List[Any] = kwargs.pop("""audio""" , __snake_case )
__magic_name__: Optional[int] = kwargs.pop("""text""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""text_target""" , __snake_case )
__magic_name__: List[str] = kwargs.pop("""audio_target""" , __snake_case )
__magic_name__: Dict = kwargs.pop("""sampling_rate""" , __snake_case )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__magic_name__: str = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
elif text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
else:
__magic_name__: Tuple = None
if audio_target is not None:
__magic_name__: List[str] = self.feature_extractor(audio_target=__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_values"""]
elif text_target is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
__magic_name__: Dict = targets["""input_ids"""]
else:
__magic_name__: Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: Optional[int] = labels
__magic_name__: Optional[int] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : int ) -> List[str]:
__magic_name__: List[Any] = kwargs.pop("""input_values""" , __snake_case )
__magic_name__: Any = kwargs.pop("""input_ids""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""labels""" , __snake_case )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__magic_name__: Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
elif input_ids is not None:
__magic_name__: int = self.tokenizer.pad(__snake_case , **__snake_case )
else:
__magic_name__: Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(__snake_case , __snake_case ) and "input_ids" in labels[0]):
__magic_name__: Union[str, Any] = self.tokenizer.pad(__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_ids"""]
else:
__magic_name__: Optional[Any] = self.feature_extractor.feature_size
__magic_name__: Optional[int] = self.feature_extractor.num_mel_bins
__magic_name__: str = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Tuple = feature_size_hack
__magic_name__: Tuple = targets["""input_values"""]
else:
__magic_name__: int = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: List[Any] = labels
__magic_name__: Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : List[str] , **__snake_case : str ) -> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
| 213 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Any=3_0 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=1_0 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=0.6 , lowerCAmelCase_ : Tuple=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = mask_ratio
__lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = ViTMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = (self.image_size // self.patch_size) ** 2
__lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a_ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase = ViTMAEModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase ( self : List[str] ) -> Dict:
pass
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : int ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
__lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCAmelCase = pt_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs[0].cpu().numpy()
__lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Make sure we don't have nans
__lowerCAmelCase = after_outputs[0].cpu().numpy()
__lowerCAmelCase = 0
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : str ) -> List[Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : int ) -> List[str]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[Any] ) -> Any:
pass
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ViTMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Tuple ) -> List[Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCAmelCase = ViTMAEConfig()
__lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1e-4 ) )
| 53 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''conditional_detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A_=True , A_=None , A_=3 , A_=3_00 , A_=6 , A_=20_48 , A_=8 , A_=6 , A_=20_48 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=2_56 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.0_2 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.2_5 , **A_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Tuple = backbone_config.get("""model_type""" )
_UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Optional[Any] = config_class.from_dict(A_ )
_UpperCAmelCase : Tuple = use_timm_backbone
_UpperCAmelCase : List[str] = backbone_config
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Tuple = num_queries
_UpperCAmelCase : Dict = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Union[str, Any] = init_xavier_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Any = auxiliary_loss
_UpperCAmelCase : List[str] = position_embedding_type
_UpperCAmelCase : int = backbone
_UpperCAmelCase : Any = use_pretrained_backbone
_UpperCAmelCase : int = dilation
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Optional[Any] = bbox_cost
_UpperCAmelCase : Tuple = giou_cost
# Loss coefficients
_UpperCAmelCase : Union[str, Any] = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : List[Any] = cls_loss_coefficient
_UpperCAmelCase : List[Any] = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case( self ):
return self.encoder_attention_heads
@property
def __snake_case( self ):
return self.d_model
def __snake_case( self ):
_UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def __snake_case( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __snake_case( self ):
return 1e-5
@property
def __snake_case( self ):
return 12
| 643 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
UpperCAmelCase : Any =None
UpperCAmelCase : Optional[Any] ={
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
UpperCAmelCase : Tuple ={
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=1 , _lowerCAmelCase=2_56):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def _lowerCAmelCase (_lowerCAmelCase):
with open(_lowerCAmelCase , "r") as f:
return json.load(_lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
with open(_lowerCAmelCase , "w") as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True):
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
UpperCamelCase_ = os.path.join(_lowerCAmelCase , "tmp")
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
UpperCamelCase_ = read_json(os.path.join(_lowerCAmelCase , "params.json"))
UpperCamelCase_ = NUM_SHARDS[model_size]
UpperCamelCase_ = params["n_layers"]
UpperCamelCase_ = params["n_heads"]
UpperCamelCase_ = n_heads // num_shards
UpperCamelCase_ = params["dim"]
UpperCamelCase_ = dim // n_heads
UpperCamelCase_ = 1_00_00.0
UpperCamelCase_ = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCamelCase_ = params["n_kv_heads"] # for GQA / MQA
UpperCamelCase_ = n_heads_per_shard // num_key_value_heads
UpperCamelCase_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCamelCase_ = n_heads
UpperCamelCase_ = n_heads_per_shard
UpperCamelCase_ = dim
# permute for sliced rotary
def permute(_lowerCAmelCase , _lowerCAmelCase=n_heads , _lowerCAmelCase=dim , _lowerCAmelCase=dim):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase).transpose(1 , 2).reshape(_lowerCAmelCase , _lowerCAmelCase)
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""")
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCamelCase_ = torch.load(os.path.join(_lowerCAmelCase , "consolidated.00.pth") , map_location="cpu")
else:
# Sharded
UpperCamelCase_ = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""") , map_location="cpu")
for i in range(_lowerCAmelCase)
]
UpperCamelCase_ = 0
UpperCamelCase_ = {"weight_map": {}}
for layer_i in range(_lowerCAmelCase):
UpperCamelCase_ = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCamelCase_ = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""]),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""]),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCamelCase_ = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCamelCase_ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
for i in range(_lowerCAmelCase)
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase))
UpperCamelCase_ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
for i in range(_lowerCAmelCase)
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
UpperCamelCase_ = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
for i in range(_lowerCAmelCase)
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase)] , dim=1)
UpperCamelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase)] , dim=0)
UpperCamelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase)] , dim=1)
UpperCamelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase)] , dim=0)
UpperCamelCase_ = inv_freq
for k, v in state_dict.items():
UpperCamelCase_ = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase))
UpperCamelCase_ = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCamelCase_ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
UpperCamelCase_ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(_lowerCAmelCase)] , dim=1),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(_lowerCAmelCase)] , dim=0),
}
for k, v in state_dict.items():
UpperCamelCase_ = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase))
# Write configs
UpperCamelCase_ = {"total_size": param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "pytorch_model.bin.index.json"))
UpperCamelCase_ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
UpperCamelCase_ = params["multiple_of"] if "multiple_of" in params else 2_56
UpperCamelCase_ = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model.")
UpperCamelCase_ = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase)
shutil.rmtree(_lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
# Initialize the tokenizer based on the `spm` model
UpperCamelCase_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""")
UpperCamelCase_ = tokenizer_class(_lowerCAmelCase)
tokenizer.save_pretrained(_lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=_lowerCAmelCase , help="Whether or not to save using `safetensors`.")
UpperCamelCase_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCamelCase_ = os.path.join(args.input_dir , "tokenizer.model")
write_tokenizer(args.output_dir , _lowerCAmelCase)
if __name__ == "__main__":
main()
| 720 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowercase :
'''simple docstring'''
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=16 , snake_case__=16 , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = embed_dim
UpperCamelCase_ = word_embed_proj_dim
UpperCamelCase_ = False
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
UpperCamelCase_ = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel(config=snake_case__ )
UpperCamelCase_ = inputs_dict["input_ids"]
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
@require_tf
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase_ = model_class(config=snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
UpperCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
UpperCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
def _lowerCAmelCase (_lowerCAmelCase):
return tf.constant(_lowerCAmelCase , dtype=tf.intaa)
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCamelCase_ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase_ = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase_ = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
UpperCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase_ = "facebook/opt-350m"
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ , add_special_tokens=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-125m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
UpperCamelCase_ = "left"
# use different length sentences to test batching
UpperCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
UpperCamelCase_ = inputs["input_ids"]
UpperCamelCase_ = model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] )
UpperCamelCase_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ )
UpperCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCamelCase_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 504 | 0 |
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : int = 'bridgetower_vision_model'
def __init__( self : str , __lowercase : Optional[Any]=768 , __lowercase : Tuple=12 , __lowercase : List[str]=3 , __lowercase : Any=16 , __lowercase : int=288 , __lowercase : List[Any]=1 , __lowercase : Any=1e-05 , __lowercase : int=False , __lowercase : Any=True , __lowercase : Any=False , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Union[str, Any] = initializer_factor
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[int] = stop_gradient
__UpperCAmelCase : List[str] = share_layernorm
__UpperCAmelCase : int = remove_last_layer
@classmethod
def A_ ( cls : Optional[int] , __lowercase : Union[str, os.PathLike] , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = 'bridgetower_text_model'
def __init__( self : Any , __lowercase : Dict=50_265 , __lowercase : int=768 , __lowercase : str=12 , __lowercase : Union[str, Any]=12 , __lowercase : str=1 , __lowercase : List[Any]=3_072 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=514 , __lowercase : List[str]=1 , __lowercase : Any=1e-05 , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Optional[int]="absolute" , __lowercase : Tuple=True , **__lowercase : str , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : Optional[Any] = pad_token_id
__UpperCAmelCase : Dict = bos_token_id
__UpperCAmelCase : Tuple = eos_token_id
@classmethod
def A_ ( cls : str , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : List[str] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'bridgetower'
def __init__( self : int , __lowercase : Any=True , __lowercase : List[Any]="gelu" , __lowercase : int=768 , __lowercase : Tuple=1 , __lowercase : List[Any]=1e-05 , __lowercase : Optional[Any]=False , __lowercase : str="add" , __lowercase : int=12 , __lowercase : Optional[int]=6 , __lowercase : List[str]=False , __lowercase : Union[str, Any]=False , __lowercase : Tuple=None , __lowercase : str=None , **__lowercase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = kwargs.pop('''text_config_dict''' , __lowercase )
__UpperCAmelCase : Any = kwargs.pop('''vision_config_dict''' , __lowercase )
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = share_cross_modal_transformer_layers
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Dict = share_link_tower_layers
__UpperCAmelCase : Any = link_tower_type
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = tie_word_embeddings
__UpperCAmelCase : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
__UpperCAmelCase : List[str] = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
__UpperCAmelCase : List[str] = BridgeTowerTextConfig(**__lowercase )
__UpperCAmelCase : List[Any] = BridgeTowerVisionConfig(**__lowercase )
@classmethod
def A_ ( cls : Any , __lowercase : BridgeTowerTextConfig , __lowercase : BridgeTowerVisionConfig , **__lowercase : Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output | 522 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = create_tensor(lowerCAmelCase__ )
__A = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = [state.process_index]
__A = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = create_tensor(lowerCAmelCase__ )
__A = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if state.is_main_process:
__A = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A = torch.arange(state.num_processes ).to(state.device )
__A = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if state.num_processes != 2:
return
__A = create_tensor(lowerCAmelCase__ )
__A = reduce(lowerCAmelCase__ , "sum" )
__A = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if state.num_processes != 2:
return
__A = create_tensor(lowerCAmelCase__ )
__A = reduce(lowerCAmelCase__ , "mean" )
__A = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
main()
def UpperCAmelCase ( ):
'''simple docstring'''
__A = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(lowerCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(lowerCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(lowerCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(lowerCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 205 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case_ : Optional[Any] =Lock()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__A = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__A = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
__A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__A = temp_rs
__A = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__A = temp_rs
__A = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
__A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
'''simple docstring'''
__A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowerCAmelCase__ )
__A = odd_even_transposition(lowerCAmelCase__ )
print("Sorted List\n" )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 205 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
def __init__( self , a__ , a__ , a__ ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_lowerCAmelCase : List[Any] = img
_lowerCAmelCase : Dict = img.shape[1]
_lowerCAmelCase : Optional[Any] = img.shape[0]
_lowerCAmelCase : str = dst_width
_lowerCAmelCase : Tuple = dst_height
_lowerCAmelCase : Optional[int] = self.src_w / self.dst_w
_lowerCAmelCase : List[str] = self.src_h / self.dst_h
_lowerCAmelCase : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_lowerCAmelCase : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def __A ( self , a__ ):
return int(self.ratio_x * x )
def __A ( self , a__ ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_a , _a : int = 800, 600
_a : Optional[int] = imread('image_data/lena.jpg', 1)
_a : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 213 | """simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(a__ )
self.set_fail_transitions()
def __A ( self , a__ , a__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = 0
for character in keyword:
_lowerCAmelCase : str = self.find_next_state(a__ , a__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase : List[str] = len(self.adlist ) - 1
else:
_lowerCAmelCase : Any = next_state
self.adlist[current_state]["output"].append(a__ )
def __A ( self ):
_lowerCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a__ )
_lowerCAmelCase : str = 0
while q:
_lowerCAmelCase : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a__ )
_lowerCAmelCase : Tuple = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(a__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
_lowerCAmelCase : List[Any] = self.adlist[state]["""fail_state"""]
_lowerCAmelCase : Optional[int] = self.find_next_state(
a__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def __A ( self , a__ ):
_lowerCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase : Any = 0
for i in range(len(a__ ) ):
while (
self.find_next_state(a__ , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase : Any = self.adlist[current_state]["""fail_state"""]
_lowerCAmelCase : List[Any] = self.find_next_state(a__ , string[i] )
if next_state is None:
_lowerCAmelCase : Optional[Any] = 0
else:
_lowerCAmelCase : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase : List[Any] = []
result[key].append(i - len(a__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
lowerCamelCase__ = {"allegro/herbert-base-cased": 514}
lowerCamelCase__ = {}
class lowerCAmelCase__ ( __snake_case ):
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Any = HerbertTokenizer
def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sep_token=__UpperCamelCase , **__UpperCamelCase , )
def A_ ( self , a , a = None ) -> Any:
'''simple docstring'''
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self , a , a = None , a = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def A_ ( self , a , a = None ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , a , a = None ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 702 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "deta"
UpperCamelCase_ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ) -> Dict:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(a , a ):
_UpperCamelCase = backbone_config.pop("""model_type""" )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(a )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.d_model
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 202 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE : int ={
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[str] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
A: List[str] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
A: List[str] = set()
A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: Union[str, Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Optional[int]:
A: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
A: Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
A: List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
A: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
A: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
A: Union[str, Any] = json.load(A )
A: Tuple = {v: k for k, v in self.encoder.items()}
A: Optional[Any] = errors # how to handle errors in decoding
A: str = bytes_to_unicode()
A: Dict = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
A: List[str] = merges_handle.read().split("""\n""" )[1:-1]
A: Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
A: Optional[Any] = dict(zip(A , range(len(A ) ) ) )
A: List[Any] = {}
A: str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Optional[int]:
return len(self.encoder )
def a__ ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , A ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
A: Tuple = tuple(A )
A: Optional[int] = get_pairs(A )
if not pairs:
return token
while True:
A: Optional[Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Tuple = bigram
A: Any = []
A: Union[str, Any] = 0
while i < len(A ):
try:
A: Dict = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: Tuple = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: str = tuple(A )
A: List[Any] = new_word
if len(A ) == 1:
break
else:
A: Optional[int] = get_pairs(A )
A: List[str] = """ """.join(A )
A: Dict = word
return word
def a__ ( self , A ) -> List[str]:
A: Dict = []
for token in re.findall(self.pat , A ):
A: Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def a__ ( self , A ) -> int:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def a__ ( self , A ) -> int:
return self.decoder.get(A )
def a__ ( self , A ) -> int:
A: Tuple = """""".join(A )
A: List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A: Optional[int] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
A: Optional[Any] = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A: Union[str, Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: Optional[Any] = [self.sep_token_id]
A: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , A , A=False , **A ) -> Dict:
A: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
A: Optional[int] = """ """ + text
return (text, kwargs)
def a__ ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
A: Any = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
A: str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A: Optional[int] = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
A: Union[str, Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A: List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A: List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 135 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float = 0.1 ):
'''simple docstring'''
A: Union[str, Any] = 3
A: Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionXLImgaImgPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Optional[int] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_lowerCAmelCase : Tuple = CLIPTextModel(snake_case__ )
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : int = CLIPTextModelWithProjection(snake_case__ )
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Dict = image / 2 + 0.5
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Dict = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : str = sd_pipe(**snake_case__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : int = sd_pipe.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[int] = 3 * ['this is a negative prompt']
_lowerCAmelCase : List[Any] = negative_prompt
_lowerCAmelCase : int = 3 * [inputs['prompt']]
_lowerCAmelCase : str = sd_pipe(**snake_case__ )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[Any] = 3 * ['this is a negative prompt']
_lowerCAmelCase : List[Any] = 3 * [inputs.pop('prompt' )]
(
_lowerCAmelCase
) : Optional[Any] = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
_lowerCAmelCase : Tuple = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
_lowerCAmelCase : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Dict = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : str = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_lowerCAmelCase : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
_lowerCAmelCase : Dict = pipe(**snake_case__ ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Tuple = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase_ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = SavedModel()
UpperCamelCase__ : List[str] = []
with open(os.path.join(__UpperCAmelCase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
UpperCamelCase__ : Optional[int] = json.load(__UpperCAmelCase )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCAmelCase )] )
with open(__UpperCAmelCase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ : Union[str, Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ : str = sorted(__UpperCAmelCase )
UpperCamelCase__ : int = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCAmelCase )
if strict and len(__UpperCAmelCase ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__UpperCAmelCase ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__UpperCAmelCase , sep='''\n''' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 253 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# Checks if the entire collection has been sorted
if len(__UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(__UpperCAmelCase , n - 1 )
rec_insertion_sort(__UpperCAmelCase , n - 1 )
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> Optional[int]:
# Checks order between adjacent elements
if index >= len(__UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(__UpperCAmelCase , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter integers separated by spaces: ')
UpperCAmelCase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 253 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
snake_case__ : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
snake_case__ : int = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = CamembertTokenizer
_snake_case : Optional[int] = CamembertTokenizerFast
_snake_case : Optional[int] = True
_snake_case : Any = True
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = "<pad>"
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1_004 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase = "I was born in 92000, and this is falsé."
__lowercase = tokenizer.encode(lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = "I was born in 92000, and this is falsé."
__lowercase = tokenizer.tokenize(lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
| 402 |
from collections import defaultdict
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = 1
__lowercase = True
for v in tree[start]:
if v not in visited:
ret += dfs(_SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(_SCREAMING_SNAKE_CASE )
return ret
def snake_case_ ( ):
dfs(1 )
if __name__ == "__main__":
snake_case__ , snake_case__ : Optional[int] = 10, 9
snake_case__ : Optional[Any] = defaultdict(list)
snake_case__ : dict[int, bool] = {}
snake_case__ : list[int] = []
snake_case__ : List[str] = 0
snake_case__ : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 402 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : list[int] ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return array
UpperCAmelCase_ , UpperCAmelCase_ = min(_UpperCamelCase ), max(_UpperCamelCase )
# Compute the variables
UpperCAmelCase_ = _max - _min + 1
UpperCAmelCase_ , UpperCAmelCase_ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase_ = i - _min
UpperCAmelCase_ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase_ = 0
for i in range(_UpperCamelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase_ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Dict = input("Enter numbers separated by comma:\n")
lowercase__ : Tuple = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 703 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : bool = False ):
'''simple docstring'''
if not arr:
return 0
lowerCAmelCase_ : List[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
lowerCAmelCase_ : Tuple = 0.0
for num in arr:
lowerCAmelCase_ : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ : Union[str, Any] = max(A__ , A__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 275 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta-prelayernorm'
def __init__( self : Tuple , lowerCamelCase : Tuple=5_02_65 , lowerCamelCase : Optional[int]=7_68 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]="absolute" , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vit'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> int:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 313 |
from itertools import count
def lowerCamelCase_ ( lowerCamelCase__ = 5_0 ):
lowerCamelCase_ = [1] * min_block_length
for n in count(lowerCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __A () ->None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 93 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Tuple = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :List[Any] = 'cpu'
lowerCAmelCase__ :List[str] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCAmelCase__ :str = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ :Dict = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ :List[Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ :int = pipeline.unet.config.in_channels
lowerCAmelCase__ :Optional[Any] = pipeline.unet.config.sample_size
lowerCAmelCase__ :Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=_SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :List[Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ :int = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_as_external_data=_SCREAMING_SNAKE_CASE , all_tensors_to_one_file=_SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=_SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :Optional[Any] = vae_encoder.config.in_channels
lowerCAmelCase__ :int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ :str = lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
# VAE DECODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :List[Any] = vae_decoder.config.latent_channels
lowerCAmelCase__ :Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ :Any = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ :Optional[int] = pipeline.safety_checker
lowerCAmelCase__ :Optional[int] = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ :Any = safety_checker.config.vision_config.image_size
lowerCAmelCase__ :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCAmelCase__ :Dict = pipeline.feature_extractor
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , _SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCAmelCase__ :Dict = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 93 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = BertTokenizer
lowerCamelCase_ = BertTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = 'UNwant\u00E9d,running'
A_ : Optional[int] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
A_ : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : List[str] = self.get_tokenizer()
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : Optional[Any] = 'UNwant\u00E9d,running'
A_ : str = tokenizer.tokenize(lowercase )
A_ : Dict = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Tuple = self.get_rust_tokenizer()
A_ : Optional[int] = tokenizer.encode(lowercase )
A_ : Any = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
A_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase )
A_ : Tuple = self.get_rust_tokenizer(do_lower_case=lowercase )
A_ : List[Any] = 'UNwant\u00E9d,running'
A_ : Optional[int] = tokenizer.tokenize(lowercase )
A_ : Union[str, Any] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : List[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : str = self.get_rust_tokenizer()
A_ : Tuple = tokenizer.encode(lowercase )
A_ : Union[str, Any] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BasicTokenizer()
A_ : int = 'a\n\'ll !!to?\'d of, can\'t.'
A_ : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A_ : Optional[Any] = {}
for i, token in enumerate(lowercase ):
A_ : Dict = i
A_ : Any = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
A_ : Union[str, Any] = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
A_ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case' ) else False
A_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ['的', '人', '有']
A_ : int = ''.join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : int = True
A_ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Any = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
A_ : int = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
A_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowercase )
A_ : Any = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : str = False
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Union[str, Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
A_ : Tuple = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
A_ : Any = tokenizer_r.convert_ids_to_tokens(lowercase )
A_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
import csv
import tweepy
# Twitter API credentials
SCREAMING_SNAKE_CASE :List[str] = ''
SCREAMING_SNAKE_CASE :List[str] = ''
SCREAMING_SNAKE_CASE :Tuple = ''
SCREAMING_SNAKE_CASE :List[str] = ''
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A = tweepy.OAuthHandler(a_ , a_ )
auth.set_access_token(a_ , a_ )
__A = tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
__A = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__A = api.user_timeline(screen_name=a_ , count=2_0_0 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
__A = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__A = api.user_timeline(
screen_name=a_ , count=2_0_0 , max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
__A = alltweets[-1].id - 1
print(F'''...{len(a_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__A = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
__A = csv.writer(a_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 55 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 1 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : Any, _UpperCAmelCase : Dict):
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError('''The length of profit and weight must be same.''')
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''')
if any(p < 0 for p in profit):
raise ValueError('''Profit can not be negative.''')
if any(w < 0 for w in weight):
raise ValueError('''Weight can not be negative.''')
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase = [p / w for p, w in zip(UpperCamelCase__, UpperCamelCase__)]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase = sorted(UpperCamelCase__)
# declaring useful variables
UpperCamelCase = len(UpperCamelCase__)
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase = sorted_profit_by_weight[length - i - 1]
UpperCamelCase = profit_by_weight.index(UpperCamelCase__)
UpperCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
snake_case_ : List[str] = [int(x) for x in input('Input profits separated by spaces: ').split()]
snake_case_ : Optional[int] = [int(x) for x in input('Input weights separated by spaces: ').split()]
snake_case_ : List[Any] = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 703 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(_UpperCAmelCase):
if len(_UpperCAmelCase) < i + 1:
data_lists.append([])
data_lists[i].append(float(_UpperCAmelCase))
return data_lists
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = []
for dlist, weight in zip(_UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = min(_UpperCAmelCase)
UpperCamelCase = max(_UpperCAmelCase)
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
UpperCamelCase = f'Invalid weight of {weight:f} provided'
raise ValueError(_UpperCAmelCase)
score_lists.append(_UpperCAmelCase)
return score_lists
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(_UpperCAmelCase):
UpperCamelCase = final_scores[j] + ele
return final_scores
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = get_data(_UpperCAmelCase)
UpperCamelCase = calculate_each_score(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = generate_final_scores(_UpperCAmelCase)
# append scores to source data
for i, ele in enumerate(_UpperCAmelCase):
source_data[i].append(_UpperCAmelCase)
return source_data
| 350 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : list[list] ) -> List[str]:
__lowerCamelCase : List[str] = current_set.copy()
for row_index, row in enumerate(_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = row[0]
for column_index, column in enumerate(_lowerCAmelCase ):
if magnitude == 0:
__lowerCamelCase : Optional[Any] = column
continue
__lowerCamelCase : List[str] = column / magnitude
# Subtract to cancel term
__lowerCamelCase : List[str] = current_set[0]
__lowerCamelCase : Dict = [first_row]
__lowerCamelCase : Union[str, Any] = current_set[1::]
for row in current_set:
__lowerCamelCase : int = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCAmelCase )
continue
for column_index in range(len(_lowerCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowerCamelCase : Optional[Any] = final_set[0]
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowerCamelCase : Optional[int] = simplify(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCAmelCase )
__lowerCamelCase : Any = resultant
return final_set
def UpperCAmelCase__ ( UpperCAmelCase_ : list[list] ) -> Optional[int]:
if len(_lowerCAmelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__lowerCamelCase : str = len(_lowerCAmelCase ) + 1
if any(len(_lowerCAmelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(_lowerCAmelCase , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(_lowerCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowerCamelCase : int = equations.copy()
if any(0 in row for row in data_set ):
__lowerCamelCase : Union[str, Any] = data_set.copy()
__lowerCamelCase : Optional[Any] = []
for row_index, row in enumerate(_lowerCAmelCase ):
if 0 not in row:
__lowerCamelCase : Dict = data_set.pop(_lowerCAmelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , _lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = data_set.copy()
__lowerCamelCase : Union[str, Any] = simplify(_lowerCAmelCase )
__lowerCamelCase : int = simplified[::-1]
__lowerCamelCase : list = []
for row in simplified:
__lowerCamelCase : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowerCamelCase : Optional[int] = row.copy()[: len(_lowerCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCAmelCase ) == 0:
solutions.append(0 )
continue
__lowerCamelCase : List[str] = temp_row[1::]
__lowerCamelCase : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(_lowerCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCAmelCase )
__lowerCamelCase : Tuple = []
for item in solutions:
final.append(float(round(_lowerCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "Pix2StructImageProcessor"
_a = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _a , _a ) -> Dict:
_A : Dict = False
super().__init__(_a , _a )
def __call__( self , _a=None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 2048 , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
_A : List[str] = self.tokenizer
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_A : Dict = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
_A : Dict = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
_A : List[str] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
_A : str = text_encoding.pop("""input_ids""" )
else:
_A : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.tokenizer.model_input_names
_A : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 71 |
from sklearn.metrics import mean_squared_error
import datasets
__A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__A = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__A = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="uniform_average" , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = mean_squared_error(
lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ )
return {"mse": mse}
| 469 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase_ : Any = TypeVar("T")
class UpperCamelCase__ ( Generic[T] ):
def __init__( self : Any , lowerCamelCase : T ):
'''simple docstring'''
a__ = data
a__ = None
def __str__( self : Optional[int] ):
'''simple docstring'''
return F'''{self.data}'''
class UpperCamelCase__ ( Generic[T] ):
def __init__( self : str ):
'''simple docstring'''
a__ = None
def __iter__( self : Tuple ):
'''simple docstring'''
a__ = self.top
while node:
yield node.data
a__ = node.next
def __str__( self : Optional[Any] ):
'''simple docstring'''
return "->".join([str(lowerCamelCase ) for item in self] )
def __len__( self : Dict ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __a ( self : Tuple ):
'''simple docstring'''
return self.top is None
def __a ( self : Optional[Any] , lowerCamelCase : T ):
'''simple docstring'''
a__ = Node(lowerCamelCase )
if not self.is_empty():
a__ = self.top
a__ = node
def __a ( self : int ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase )
a__ = self.top
a__ = self.top.next
return pop_node.data
def __a ( self : int ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase_ : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowerCamelCase (__lowerCamelCase : Tuple , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None ) -> Any:
a__ = True
while ask_again:
a__ = input(__lowerCamelCase )
try:
if default is not None and len(__lowerCamelCase ) == 0:
return default
return convert_value(__lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : Dict , __lowerCamelCase : List[Any]=[] , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=0 ) -> List[Any]:
a__ = BulletMenu(__lowerCamelCase , __lowerCamelCase )
a__ = menu.run(default_choice=__lowerCamelCase )
return convert_value(__lowerCamelCase ) if convert_value is not None else result
def _lowerCamelCase (__lowerCamelCase : Tuple ) -> Optional[Any]:
a__ = int(__lowerCamelCase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCamelCase (__lowerCamelCase : List[str] ) -> Optional[int]:
a__ = int(__lowerCamelCase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Optional[Any]:
a__ = int(__lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCamelCase (__lowerCamelCase : str ) -> str:
a__ = int(__lowerCamelCase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCamelCase (__lowerCamelCase : int ) -> Optional[Any]:
a__ = int(__lowerCamelCase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCamelCase (__lowerCamelCase : Any ) -> List[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase__ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self : Dict , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
'''simple docstring'''
a__ = super()._format_usage(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ = usage.replace("<command> [<args>] " , "" )
return usage
| 289 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE_: int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 |
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
A_ : int = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case__ ( UpperCAmelCase : Dict ):
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case__ ( ):
lowerCAmelCase__ :str = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=UpperCAmelCase )
lowerCAmelCase__ :Any = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase )
EnvironmentCommand.register_subcommand(UpperCAmelCase )
TestCommand.register_subcommand(UpperCAmelCase )
RunBeamCommand.register_subcommand(UpperCAmelCase )
DummyDataCommand.register_subcommand(UpperCAmelCase )
# Parse args
lowerCAmelCase__ :int = parser.parse_known_args()
if not hasattr(UpperCAmelCase , "func" ):
parser.print_help()
exit(1 )
lowerCAmelCase__ :Optional[int] = parse_unknown_args(UpperCAmelCase )
# Run
lowerCAmelCase__ :str = args.func(UpperCAmelCase , **UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 708 |
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : list[int] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_A , _A : List[Any] = array[indexa], array[indexa]
def lowerCAmelCase__ ( lowerCamelCase : list[int] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ):
if length > 1:
_A : Any = int(length / 2 )
for i in range(lowerCamelCase ,low + middle ):
comp_and_swap(lowerCamelCase ,lowerCamelCase ,i + middle ,lowerCamelCase )
bitonic_merge(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
bitonic_merge(lowerCamelCase ,low + middle ,lowerCamelCase ,lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : list[int] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ):
if length > 1:
_A : List[Any] = int(length / 2 )
bitonic_sort(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,1 )
bitonic_sort(lowerCamelCase ,low + middle ,lowerCamelCase ,0 )
bitonic_merge(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
if __name__ == "__main__":
A : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
A : str = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 128 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCamelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]):
super().__init__()
_A : Any = model
_A : Tuple = 2
_A : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def A ( self : Dict):
pass
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : str ):
# load longformer model from model identifier
_A : Optional[Any] = LongformerModel.from_pretrained(lowerCamelCase )
_A : Union[str, Any] = LightningModel(lowerCamelCase )
_A : Optional[Any] = torch.load(lowerCamelCase ,map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_A : Dict = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 128 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a =logging.get_logger(__name__)
a ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a ={
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a ={
'facebook/blenderbot_small-90M': 512,
}
class __UpperCAmelCase ( _a ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
lowerCamelCase__ =add_prefix_space
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 714 | """simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
A__ : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
# test with a raw waveform
lowerCamelCase__ =np.zeros((34000,) )
lowerCamelCase__ =np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =examples
lowerCamelCase__ =audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def _a ( self , _lowerCamelCase ):
import datasets
# test with a local file
lowerCamelCase__ =datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
lowerCamelCase__ =dataset[0]["audio"]["array"]
lowerCamelCase__ =audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
@require_torch
def _a ( self ):
lowerCamelCase__ ="anton-l/wav2vec2-random-tiny-classifier"
lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase )
lowerCamelCase__ =np.ones((8000,) )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
lowerCamelCase__ =[
{"score": 0.0_8_4_2, "label": "no"},
{"score": 0.0_8_3_8, "label": "up"},
{"score": 0.0_8_3_7, "label": "go"},
{"score": 0.0_8_3_4, "label": "right"},
]
lowerCamelCase__ =[
{"score": 0.0_8_4_5, "label": "stop"},
{"score": 0.0_8_4_4, "label": "on"},
{"score": 0.0_8_4_1, "label": "right"},
{"score": 0.0_8_3_4, "label": "left"},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase__ ={"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _a ( self ):
import datasets
lowerCamelCase__ ="superb/wav2vec2-base-superb-ks"
lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase )
lowerCamelCase__ =datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
lowerCamelCase__ =np.array(dataset[3]["speech"] , dtype=np.floataa )
lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3 ) , [
{"score": 0.9_8_1, "label": "go"},
{"score": 0.0_0_7, "label": "up"},
{"score": 0.0_0_6, "label": "_unknown_"},
{"score": 0.0_0_1, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _a ( self ):
pass
| 132 | 0 |
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any]=0 ):
"""simple docstring"""
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[column] )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(min(6 ,points_counts - 1 ) ,__UpperCamelCase ):
for j in range(max(0 ,i - 6 ) ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__UpperCamelCase ,__UpperCamelCase )
# recursion
A_ = points_counts // 2
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[:mid] ,__UpperCamelCase )
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[mid:] ,points_counts - mid )
A_ = min(__UpperCamelCase ,__UpperCamelCase )
A_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCamelCase )
A_ = dis_between_closest_in_strip(
__UpperCamelCase ,len(__UpperCamelCase ) ,__UpperCamelCase )
return min(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = column_based_sort(__UpperCamelCase ,column=0 )
A_ = column_based_sort(__UpperCamelCase ,column=1 )
return (
closest_pair_of_points_sqr(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
__a :List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 86 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
if not nums:
return 0
UpperCAmelCase_ : int = nums[0]
UpperCAmelCase_ : Any = 0
for num in nums[1:]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] =IFImgaImgSuperResolutionPipeline
__snake_case : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__snake_case : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__snake_case : int =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=0) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._test_save_load_local()
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 259 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | """simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( lowerCAmelCase_ ):
@slow
@require_torch
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : int ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 277 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCAmelCase_, 0, lowerCAmelCase_, args=(lowerCAmelCase_) )[0]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252 | 0 |
def a ( A__ , A__ ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE__ : List[str] = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
SCREAMING_SNAKE_CASE__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Dict = [1, 3, 0, 5, 8, 5]
a_ :int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 35 |
from __future__ import annotations
__UpperCAmelCase : Any = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase_ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase_ ):
if location := find_empty_location(UpperCamelCase_ ):
_a , _a : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : List[str] = digit
if sudoku(UpperCamelCase_ ) is not None:
return grid
_a : Optional[int] = 0
return None
def lowerCamelCase_ ( UpperCamelCase_ ):
for row in grid:
for cell in row:
print(UpperCamelCase_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__UpperCAmelCase : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 471 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : str ):
__snake_case : Optional[int] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__snake_case : List[str] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Optional[Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : Any = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Tuple = model(_lowerCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
@slow
def snake_case__ ( self : int ):
__snake_case : Dict = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__snake_case : int = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Dict = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : Any = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Union[str, Any] = model(_lowerCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 390 | import numpy as np
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase : int =ksize + 1
lowercase : str =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
# distance from center
lowercase : Optional[Any] =x - ksize // 2
lowercase : List[str] =y - ksize // 2
# degree to radiant
lowercase : Optional[int] =theta / 180 * np.pi
lowercase : Union[str, Any] =np.cos(_theta )
lowercase : Optional[int] =np.sin(_theta )
# get kernel x
lowercase : Tuple =cos_theta * px + sin_theta * py
# get kernel y
lowercase : Dict =-sin_theta * px + cos_theta * py
# fill kernel
lowercase : str =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase_ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase_ = out / out.max() * 255
UpperCamelCase_ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 92 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Any =False
@skip_mps
class UpperCamelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[int] = StableDiffusionAttendAndExcitePipeline
_a : Union[str, Any] = False
_a : Dict = TEXT_TO_IMAGE_PARAMS
_a : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , )
lowerCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCamelCase_ : Any = CLIPTextModel(lowerCamelCase )
lowerCamelCase_ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=0 ):
if str(lowerCamelCase ).startswith('mps' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase_ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] = 'cpu'
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : List[str] = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase_ : List[str] = pipe(**lowerCamelCase ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ : Dict = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __a ( self : Tuple ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self : Tuple ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def __a ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(51 )
lowerCamelCase_ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ : List[Any] = 'a painting of an elephant with glasses'
lowerCamelCase_ : Tuple = [5, 7]
lowerCamelCase_ : List[str] = pipe(
prompt=lowerCamelCase , token_indices=lowerCamelCase , guidance_scale=7.5 , generator=lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 364 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 454 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tensor , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =len(list(m.modules())) == 1 or isinstance(_lowerCAmelCase , nn.Convad) or isinstance(_lowerCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_lowerCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return list(filter(lambda _lowerCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __call__( self : Any , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =Tracker(self.dest)(_lowerCAmelCase).parametrized
__lowercase =Tracker(self.src)(_lowerCAmelCase).parametrized
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.src_skip , _lowerCAmelCase))
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.dest_skip , _lowerCAmelCase))
if len(_lowerCAmelCase) != len(_lowerCAmelCase):
raise Exception(
f"""Numbers of operations are different. Source module has {len(_lowerCAmelCase)} operations while"""
f""" destination module has {len(_lowerCAmelCase)}.""")
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""")
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__lowercase =timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__lowercase =ResNetForImageClassification(_lowerCAmelCase ).eval()
__lowercase =ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__lowercase =torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__lowercase =f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__lowercase =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_lowerCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ):
"""simple docstring"""
__lowercase ='imagenet-1k-id2label.json'
__lowercase =1_000
__lowercase =(1, num_labels)
__lowercase ='huggingface/label-files'
__lowercase =num_labels
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__lowercase ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 454 | 1 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( snake_case_ : list[Any] ):
create_state_space_tree(snake_case_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[Any] , snake_case_ : list[Any] , snake_case_ : int ):
if index == len(snake_case_ ):
print(snake_case_ )
return
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 297 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : Any = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Any ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : int = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ : Any = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : Dict = value
elif weight_type == "bias":
snake_case__ : int = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int ):
snake_case__ : List[Any] = []
snake_case__ : List[str] = fairseq_model.state_dict()
snake_case__ : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : str = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : int = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : List[str] = "weight"
else:
snake_case__ : List[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int ):
snake_case__ : Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ : List[Any] = name.split("." )
snake_case__ : Optional[Any] = int(items[0] )
snake_case__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str]=None ):
# load the pre-trained checkpoints
snake_case__ : List[str] = torch.load(snake_case_ )
snake_case__ : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
snake_case__ : Optional[Any] = WavLMOrig(snake_case_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
snake_case__ : Tuple = WavLMConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Union[str, Any] = WavLMConfig()
snake_case__ : Optional[Any] = WavLMModel(snake_case_ )
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavlm.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 297 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_snake_case , _snake_case = array[indexa], array[indexa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
for i in range(lowerCAmelCase_ , low + middle ):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 )
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 718 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
snake_case = '''▁'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BigBirdTokenizer
A__ : Dict = ['''input_ids''', '''attention_mask''']
A__ : List[int] = []
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : List[str]="[SEP]" , __lowerCamelCase : str="[MASK]" , __lowerCamelCase : str="[CLS]" , **__lowerCamelCase : Any , ):
"""simple docstring"""
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 404 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Dict = logging.get_logger(__name__)
_a : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _lowercase ( __lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "bit"
_SCREAMING_SNAKE_CASE : List[str] = ["preactivation", "bottleneck"]
_SCREAMING_SNAKE_CASE : List[str] = ["SAME", "VALID"]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : int=64 , SCREAMING_SNAKE_CASE_ : str=[256, 512, 1024, 2048] , SCREAMING_SNAKE_CASE_ : Any=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="preactivation" , SCREAMING_SNAKE_CASE_ : str="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Dict=None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__snake_case = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = global_padding
__snake_case = num_groups
__snake_case = drop_path_rate
__snake_case = embedding_dynamic_padding
__snake_case = output_stride
__snake_case = width_factor
__snake_case = ['stem'] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 56 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : Dict = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : str = image_mean
_UpperCamelCase : int = image_std
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : int = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Any = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 195 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = analyze_text(_UpperCAmelCase )
__magic_name__ :Dict = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__magic_name__ :List[Any] = sum(single_char_strings.values() )
# one length string
__magic_name__ :List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__magic_name__ :str = single_char_strings[ch]
__magic_name__ :List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_UpperCAmelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__magic_name__ :Any = sum(two_char_strings.values() )
__magic_name__ :str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__magic_name__ :Optional[int] = cha + cha
if sequence in two_char_strings:
__magic_name__ :List[str] = two_char_strings[sequence]
__magic_name__ :Optional[Any] = int(_UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(_UpperCAmelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = Counter() # type: ignore
__magic_name__ :Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case = None, ):
"""simple docstring"""
__magic_name__ :List[Any] = {}
if train_file is not None:
__magic_name__ :List[Any] = [train_file]
if eval_file is not None:
__magic_name__ :int = [eval_file]
if test_file is not None:
__magic_name__ :Any = [test_file]
__magic_name__ :Optional[Any] = datasets.load_dataset('''csv''', data_files=snake_case )
__magic_name__ :List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__magic_name__ :Optional[int] = features_name.pop(snake_case )
__magic_name__ :List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__magic_name__ :List[Any] = {label: i for i, label in enumerate(snake_case )}
__magic_name__ :Optional[Any] = tokenizer.model_input_names
__magic_name__ :Any = {}
if len(snake_case ) == 1:
for k in files.keys():
__magic_name__ :Union[str, Any] = ds[k].map(
lambda snake_case : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=snake_case, max_length=snake_case, padding='''max_length''' ), batched=snake_case, )
elif len(snake_case ) == 2:
for k in files.keys():
__magic_name__ :Optional[Any] = ds[k].map(
lambda snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=snake_case, max_length=snake_case, padding='''max_length''', ), batched=snake_case, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__magic_name__ :Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ :int = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__magic_name__ :Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ :Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__magic_name__ :Any = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ :List[Any] = labelaid[ex[label_name]]
yield (d, label)
__magic_name__ :Optional[Any] = (
tf.data.Dataset.from_generator(
snake_case, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__magic_name__ :Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__magic_name__ :Any = (
tf.data.Dataset.from_generator(
snake_case, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__magic_name__ :str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__magic_name__ :Optional[int] = (
tf.data.Dataset.from_generator(
snake_case, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__magic_name__ :Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
SCREAMING_SNAKE_CASE__ : List[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
a__ = field(metadata={'''help''': '''Which column contains the label'''} )
a__ = field(default=lowerCamelCase , metadata={'''help''': '''The path of the training file'''} )
a__ = field(default=lowerCamelCase , metadata={'''help''': '''The path of the development file'''} )
a__ = field(default=lowerCamelCase , metadata={'''help''': '''The path of the test file'''} )
a__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a__ = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class lowerCamelCase_ :
a__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a__ = field(default=lowerCamelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ :Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=snake_case, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
__magic_name__ :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(snake_case ), labelaid=snake_case, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='''text-classification''', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
__magic_name__ :Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('''.bin''' in model_args.model_name_or_path ), config=snake_case, cache_dir=model_args.cache_dir, )
def compute_metrics(snake_case ) -> Dict:
__magic_name__ :str = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__magic_name__ :str = TFTrainer(
model=snake_case, args=snake_case, train_dataset=snake_case, eval_dataset=snake_case, compute_metrics=snake_case, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ :int = trainer.evaluate()
__magic_name__ :Dict = os.path.join(training_args.output_dir, '''eval_results.txt''' )
with open(snake_case, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(snake_case )
return results
if __name__ == "__main__":
main()
| 180 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( A_ ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__magic_name__ = model_type_to_module_name(A_ )
__magic_name__ = importlib.import_module(f'''.{module_name}''', """transformers.models""" )
try:
return getattr(A_, A_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A_, """__name__""", A_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__magic_name__ = importlib.import_module("""transformers""" )
if hasattr(A_, A_ ):
return getattr(A_, A_ )
return None
def a__ ( A_, A_ = None, A_ = False, A_ = False, A_ = None, A_ = None, A_ = None, A_ = False, **A_, ):
'''simple docstring'''
__magic_name__ = get_file_from_repo(
A_, A_, cache_dir=A_, force_download=A_, resume_download=A_, proxies=A_, use_auth_token=A_, revision=A_, local_files_only=A_, )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(A_, encoding="""utf-8""" ) as reader:
return json.load(A_ )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def _lowercase ( cls : Optional[int] , UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = kwargs.pop("""config""" , UpperCamelCase__ )
__magic_name__ = kwargs.pop("""trust_remote_code""" , UpperCamelCase__ )
__magic_name__ = True
__magic_name__ , __magic_name__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = config_dict.get("""feature_extractor_type""" , UpperCamelCase__ )
__magic_name__ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__magic_name__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.feature_extractor_type``
__magic_name__ = getattr(UpperCamelCase__ , """feature_extractor_type""" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__magic_name__ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__magic_name__ = feature_extractor_class_from_name(UpperCamelCase__ )
__magic_name__ = feature_extractor_auto_map is not None
__magic_name__ = feature_extractor_class is not None or type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING
__magic_name__ = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
__magic_name__ = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = kwargs.pop("""code_revision""" , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
__magic_name__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__ )]
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _lowercase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 529 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Union[str, Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a__ ( _UpperCamelCase : Optional[Any] ):
return (data["data"], data["target"])
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : List[str] ):
__lowerCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_UpperCamelCase ,_UpperCamelCase )
# Predict target for test data
__lowerCamelCase = xgb.predict(_UpperCamelCase )
__lowerCamelCase = predictions.reshape(len(_UpperCamelCase ) ,1 )
return predictions
def a__ ( ):
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase ,__lowerCamelCase = data_handling(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = train_test_split(
_UpperCamelCase ,_UpperCamelCase ,test_size=0.25 ,random_state=1 )
__lowerCamelCase = xgboost(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(_UpperCamelCase ,_UpperCamelCase )}""" )
print(F"""Mean Square Error : {mean_squared_error(_UpperCamelCase ,_UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 717 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _a :
def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: str=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Any=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=4 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=32 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[int]=0.02 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = initializer_range
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowercase__ = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class _a ( unittest.TestCase ):
_lowercase : List[str] = 99
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
lowercase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase__ = input_ids.shape[0]
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._get_config_and_data()
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = lm_model(input_ids=UpperCamelCase_ )
lowercase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase__ = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowercase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _a ( UpperCamelCase__ , unittest.TestCase , UpperCamelCase__ ):
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : Any = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxBlenderbotModelTester(self )
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_: Dict , UpperCamelCase_: int=None , **UpperCamelCase_: Dict ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowercase__ = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowercase__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowercase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowercase__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowercase__ = ['''Sam''']
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowercase__ = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = '''Sam is a great name. It means "sun" in Gaelic.'''
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 154 |
from __future__ import annotations
def A ( a_ ) -> bool:
__UpperCamelCase : Optional[int] =str(a_ )
return len(a_ ) == 9 and set(a_ ) == set('123456789' )
def A ( ) -> int | None:
for base_num in range(9_999 ,4_999 ,-1 ):
__UpperCamelCase : Optional[Any] =100_002 * base_num
if is_9_pandigital(a_ ):
return candidate
for base_num in range(333 ,99 ,-1 ):
__UpperCamelCase : Optional[Any] =1_002_003 * base_num
if is_9_pandigital(a_ ):
return candidate
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
'''simple docstring'''
from math import factorial
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = real
if isinstance(_a ,_a ):
_UpperCamelCase : Optional[Any] = [1] * rank
else:
_UpperCamelCase : int = rank
def __repr__( self : Any ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_a )
def __add__( self : Optional[int] ,lowerCamelCase__ : Any ):
'''simple docstring'''
if not isinstance(_a ,_a ):
return Dual(self.real + other ,self.duals )
_UpperCamelCase : int = self.duals.copy()
_UpperCamelCase : str = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
_UpperCamelCase : Optional[int] = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_a )
lowercase__ = __add__
def __sub__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
return self + other * -1
def __mul__( self : str ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_UpperCamelCase : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_a )
_UpperCamelCase : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_a )
lowercase__ = __mul__
def __truediv__( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_UpperCamelCase : int = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_a )
raise ValueError
def __floordiv__( self : Union[str, Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_UpperCamelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_a )
raise ValueError
def __pow__( self : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
if n < 0 or isinstance(_a ,_a ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_UpperCamelCase : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not callable(UpperCAmelCase_ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase_ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('differentiate() requires an int as input for order' )
_UpperCamelCase : Optional[Any] = Dual(UpperCAmelCase_ , 1 )
_UpperCamelCase : Optional[Any] = func(UpperCAmelCase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A__ ( UpperCAmelCase_ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 195 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:jnp.ndarray
SCREAMING_SNAKE_CASE:jnp.ndarray
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
"""simple docstring"""
a__ = self.conv_in(_a )
a__ = nn.silu(_a )
for block in self.blocks:
a__ = block(_a )
a__ = nn.silu(_a )
a__ = self.conv_out(_a )
return embedding
@flax_register_to_config
class _UpperCamelCase ( nn.Module , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = 32
SCREAMING_SNAKE_CASE:int = 4
SCREAMING_SNAKE_CASE:Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE:Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE:Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE:int = 2
SCREAMING_SNAKE_CASE:Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE:Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE:int = 1280
SCREAMING_SNAKE_CASE:float = 0.0
SCREAMING_SNAKE_CASE:bool = False
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE:bool = True
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:str = "rgb"
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , _a ):
"""simple docstring"""
# init input tensors
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ , a__ = jax.random.split(_a )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(_a , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(_a , _a ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(_a , 0 )
a__ = self.time_proj(_a )
a__ = self.time_embedding(_a )
# 2. pre-process
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.conv_in(_a )
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
a__ , a__ = down_block(_a , _a , _a , deterministic=not train )
else:
a__ , a__ = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
a__ = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(_a )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 394 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ) -> Any:
_a = size if size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = apply_ocr
def a_ ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase ):
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a_ ( self ) -> Optional[Any]:
_a = LayoutLMvaImageProcessingTester(self )
@property
def a_ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ) -> str:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "apply_ocr" ) )
def a_ ( self ) -> int:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def a_ ( self ) -> Union[str, Any]:
pass
def a_ ( self ) -> int:
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase__ )
self.assertIsInstance(encoding.boxes , lowerCamelCase__ )
# Test batched
_a = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def a_ ( self ) -> Optional[int]:
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_a = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def a_ ( self ) -> List[str]:
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_a = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def a_ ( self ) -> List[Any]:
_a = LayoutLMvaImageProcessor()
from datasets import load_dataset
_a = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
_a = Image.open(ds[0]["file"] ).convert("RGB" )
_a = image_processing(lowerCamelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_a = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase__ )
self.assertListEqual(encoding.boxes , lowerCamelCase__ )
# with apply_OCR = False
_a = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ )
_a = image_processing(lowerCamelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 718 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCamelCase__ ):
def a_ ( self ) -> Tuple:
_a = load_tool("text-classification" )
self.tool.setup()
_a = load_tool("text-classification" , remote=__UpperCamelCase )
def a_ ( self ) -> Optional[int]:
_a = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def a_ ( self ) -> Union[str, Any]:
_a = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def a_ ( self ) -> Optional[int]:
_a = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def a_ ( self ) -> str:
_a = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 276 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger()
@dataclass
class __A :
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : List[nn.Module] = field(default_factory=lowerCAmelCase )
lowerCAmelCase_ : list = field(default_factory=lowerCAmelCase )
def lowercase__ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor ):
lowerCAmelCase : str = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase_ , nn.Convad ) or isinstance(UpperCAmelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List = field(default_factory=lowerCAmelCase )
lowerCAmelCase_ : List = field(default_factory=lowerCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase_ : Tensor ):
lowerCAmelCase : List[str] = Tracker(self.dest )(UpperCAmelCase_ ).parametrized
lowerCAmelCase : Optional[Any] = Tracker(self.src )(UpperCAmelCase_ ).parametrized
lowerCAmelCase : Tuple = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.src_skip , UpperCAmelCase_ ) )
lowerCAmelCase : Tuple = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.dest_skip , UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(UpperCAmelCase_ )} operations while"
f" destination module has {len(UpperCAmelCase_ )}." )
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = True ) -> List[Any]:
'''simple docstring'''
print(f"Converting {name}..." )
with torch.no_grad():
lowerCAmelCase : Tuple = timm.create_model(_UpperCAmelCase, pretrained=_UpperCAmelCase ).eval()
lowerCAmelCase : Optional[Any] = ResNetForImageClassification(_UpperCAmelCase ).eval()
lowerCAmelCase : List[Any] = ModuleTransfer(src=_UpperCAmelCase, dest=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ), our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase : Dict = f"resnet{'-'.join(name.split('resnet' ) )}"
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=_UpperCAmelCase, )
# we can use the convnext one
lowerCAmelCase : Any = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=_UpperCAmelCase, )
print(f"Pushed {checkpoint_name}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[str] = 1_000
lowerCAmelCase : Any = (1, num_labels)
lowerCAmelCase : Any = 'huggingface/label-files'
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : str = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase, repo_type='dataset' ), 'r' ) )
lowerCAmelCase : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : Tuple = partial(_UpperCAmelCase, num_labels=_UpperCAmelCase, idalabel=_UpperCAmelCase, labelaid=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1_024, 2_048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1_024, 2_048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1_024, 2_048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1_024, 2_048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase, names_to_config[model_name], _UpperCAmelCase, _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__A : List[Any] = parser.parse_args()
__A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 343 |
import math
import sys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : str = ''
try:
with open(_UpperCAmelCase, 'rb' ) as binary_file:
lowerCAmelCase : Any = binary_file.read()
for dat in data:
lowerCAmelCase : int = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Tuple = {'0': '0', '1': '1'}
lowerCAmelCase , lowerCAmelCase : Tuple = '', ''
lowerCAmelCase : Any = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
lowerCAmelCase : int = last_match_id + '0'
if math.loga(_UpperCAmelCase ).is_integer():
lowerCAmelCase : List[str] = {}
for curr_key in list(_UpperCAmelCase ):
lowerCAmelCase : List[Any] = lexicon.pop(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = new_lex
lowerCAmelCase : Tuple = last_match_id + '1'
index += 1
lowerCAmelCase : List[Any] = ''
return result
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> None:
'''simple docstring'''
lowerCAmelCase : Dict = 8
try:
with open(_UpperCAmelCase, 'wb' ) as opened_file:
lowerCAmelCase : List[Any] = [
to_write[i : i + byte_length]
for i in range(0, len(_UpperCAmelCase ), _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : int = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase : int = data_bits[counter:]
lowerCAmelCase : Optional[int] = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> None:
'''simple docstring'''
lowerCAmelCase : Tuple = read_file_binary(_UpperCAmelCase )
lowerCAmelCase : int = remove_prefix(_UpperCAmelCase )
lowerCAmelCase : List[str] = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343 | 1 |
def __lowerCAmelCase ( A , A , A , A , A , ):
UpperCAmelCase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
UpperCAmelCase_ = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_a: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 704 |
from sklearn.metrics import mean_squared_error
import datasets
_a: Any = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_a: List[Any] = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_a: List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __A ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __A ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : str=None , lowerCAmelCase : str="uniform_average" , lowerCAmelCase : Any=True ):
'''simple docstring'''
UpperCAmelCase_ = mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse} | 268 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a , a , a = None , a = None ) -> Any:
"""simple docstring"""
super().__init__()
_A = pad_token_id
_A = max_length
_A = vocab
_A = merges
_A = BytePairTokenizer(a , a , sequence_length=a )
@classmethod
def lowercase_ ( cls , a , *a , **a ) -> str:
"""simple docstring"""
_A = [''' '''.join(a ) for m in tokenizer.bpe_ranks.keys()]
_A = tokenizer.get_vocab()
return cls(a , a , *a , **a )
@classmethod
def lowercase_ ( cls , a , *a , **a ) -> Optional[Any]:
"""simple docstring"""
_A = GPTaTokenizer.from_pretrained(a , *a , **a )
return cls.from_tokenizer(a , *a , **a )
@classmethod
def lowercase_ ( cls , a ) -> Any:
"""simple docstring"""
return cls(**a )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase_ ( self , a , a = None ) -> Union[str, Any]:
"""simple docstring"""
_A = self.tf_tokenizer(a )
_A = tf.ones_like(a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A = pad_model_inputs(
a , max_seq_length=a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 317 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCamelCase = get_logger(__name__)
__lowerCamelCase = Path(__file__).parent / """model_card_template.md"""
__lowerCamelCase = uuida().hex
__lowerCamelCase = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def UpperCAmelCase__ ( __snake_case = None ) -> str:
_A = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__snake_case , __snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__snake_case , __snake_case ):
ua += "; " + user_agent
return ua
def UpperCAmelCase__ ( __snake_case , __snake_case = None , __snake_case = None ) -> Union[str, Any]:
if token is None:
_A = HfFolder.get_token()
if organization is None:
_A = whoami(__snake_case )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> Optional[int]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__snake_case , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
_A = args.hub_token if hasattr(__snake_case , '''hub_token''' ) else None
_A = get_full_repo_name(__snake_case , token=__snake_case )
_A = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__snake_case , model_name=__snake_case , repo_name=__snake_case , dataset_name=args.dataset_name if hasattr(__snake_case , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__snake_case , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__snake_case , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__snake_case , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__snake_case , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__snake_case , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__snake_case , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__snake_case , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__snake_case , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__snake_case , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__snake_case , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
_A = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__snake_case )
def UpperCAmelCase__ ( __snake_case , __snake_case = None ) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
_A = str(Path(__snake_case ).as_posix() )
_A = re.search(R'''snapshots/([^/]+)/''' , __snake_case )
if search is None:
return None
_A = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCamelCase = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
__lowerCamelCase = os.path.join(hf_cache_home, """diffusers""")
def UpperCAmelCase__ ( __snake_case = None , __snake_case = None ) -> None:
if new_cache_dir is None:
_A = DIFFUSERS_CACHE
if old_cache_dir is None:
_A = old_diffusers_cache
_A = Path(__snake_case ).expanduser()
_A = Path(__snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_A = new_cache_dir / old_blob_path.relative_to(__snake_case )
new_blob_path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
os.replace(__snake_case , __snake_case )
try:
os.symlink(__snake_case , __snake_case )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCamelCase = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
__lowerCamelCase = 0
else:
with open(cache_version_file) as f:
try:
__lowerCamelCase = int(f.read())
except ValueError:
__lowerCamelCase = 0
if cache_version < 1:
__lowerCamelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
__lowerCamelCase = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def UpperCAmelCase__ ( __snake_case , __snake_case = None ) -> str:
if variant is not None:
_A = weights_name.split('''.''' )
_A = splits[:-1] + [variant] + splits[-1:]
_A = '''.'''.join(__snake_case )
return weights_name
def UpperCAmelCase__ ( __snake_case , *,
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , ) -> Optional[int]:
_A = str(__snake_case )
if os.path.isfile(__snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(__snake_case ):
if os.path.isfile(os.path.join(__snake_case , __snake_case ) ):
# Load from a PyTorch checkpoint
_A = os.path.join(__snake_case , __snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__snake_case , __snake_case , __snake_case ) ):
_A = os.path.join(__snake_case , __snake_case , __snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__snake_case ).base_version ) >= version.parse('''0.20.0''' )
):
try:
_A = hf_hub_download(
__snake_case , filename=_add_variant(__snake_case , __snake_case ) , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , user_agent=__snake_case , subfolder=__snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__snake_case , __snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__snake_case , __snake_case )}\' so that the correct variant file can be added.''' , __snake_case , )
try:
# 2. Load model file as usual
_A = hf_hub_download(
__snake_case , filename=__snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , user_agent=__snake_case , subfolder=__snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 317 | 1 |
def __magic_name__( _A , _A ):
'''simple docstring'''
return base * power(_A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowerCamelCase_ : List[str] = int(input('''Enter the base: ''').strip())
lowerCamelCase_ : Dict = int(input('''Enter the exponent: ''').strip())
lowerCamelCase_ : str = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCamelCase_ : int = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 702 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Optional[Any] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : int = 3_2 , lowercase : List[Any]=PILImageResampling.BILINEAR , lowercase : bool = True , **lowercase : str , ) -> None:
'''simple docstring'''
UpperCamelCase__ = do_resize
UpperCamelCase__ = do_rescale
UpperCamelCase__ = size_divisor
UpperCamelCase__ = resample
super().__init__(**lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : List[Any] , lowercase : Optional[ChannelDimension] = None , **lowercase : Any ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = get_image_size(lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ = height // size_divisor * size_divisor
UpperCamelCase__ = width // size_divisor * size_divisor
UpperCamelCase__ = resize(lowercase , (new_h, new_w) , resample=lowercase , data_format=lowercase , **lowercase )
return image
def A ( self : int , lowercase : np.ndarray , lowercase : float , lowercase : Optional[ChannelDimension] = None , **lowercase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : int , lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Any]=None , lowercase : Optional[bool] = None , lowercase : Optional[Union[TensorType, str]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ) -> BatchFeature:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowercase ) for img in images]
if do_resize:
UpperCamelCase__ = [self.resize(lowercase , size_divisor=lowercase , resample=lowercase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(lowercase , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 265 | 0 |
import math
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> int:
'''simple docstring'''
return math.sqrt(_UpperCAmelCase ) * math.sqrt(_UpperCAmelCase ) == num
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> Tuple:
'''simple docstring'''
A_ = 0
A_ = n
while left <= right:
A_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ = mid - 1
else:
A_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
__A = 42
__A = None
__A = None
_lowerCamelCase : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCAmelCase ( __magic_name__ :TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__magic_name__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase_ = True
except ImportError:
lowercase_ = False
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase_ : ArgumentParser )-> Union[str, Any]:
'''simple docstring'''
A__ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing',action='store_true',help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file',type=__SCREAMING_SNAKE_CASE,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path',type=__SCREAMING_SNAKE_CASE,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self : Any,lowercase_ : bool,lowercase_ : str,lowercase_ : int=None,*lowercase_ : List[Any] )-> List[str]:
'''simple docstring'''
A__ = testing
A__ = testing_file
A__ = path
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A__ = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A__ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file,'r' ) as configuration_file:
A__ = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ),no_input=__SCREAMING_SNAKE_CASE,extra_context=__SCREAMING_SNAKE_CASE,)
A__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json','r' ) as configuration_file:
A__ = json.load(__SCREAMING_SNAKE_CASE )
A__ = configuration['lowercase_modelname']
A__ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A__ = 'Flax' in generate_tensorflow_pytorch_and_flax
A__ = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}',exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py','w' ):
pass
shutil.move(
F'{directory}/__init__.py',F'{model_dir}/__init__.py',)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py',F'{model_dir}/configuration_{lowercase_model_name}.py',)
def remove_copy_lines(lowercase_ : List[str] ):
with open(__SCREAMING_SNAKE_CASE,'r' ) as f:
A__ = f.readlines()
with open(__SCREAMING_SNAKE_CASE,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py',F'{model_dir}/modeling_{lowercase_model_name}.py',)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py',F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py',)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py',F'{model_dir}/modeling_tf_{lowercase_model_name}.py',)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py',F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py',)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py',F'{model_dir}/modeling_flax_{lowercase_model_name}.py',)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py',F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py',)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md',F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md',)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py',F'{model_dir}/tokenization_{lowercase_model_name}.py',)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py',F'{model_dir}/tokenization_{lowercase_model_name}_fast.py',)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase_ : str,lowercase_ : str,lowercase_ : List[str] ):
# Create temp file
A__ , A__ = mkstemp()
A__ = False
with fdopen(__SCREAMING_SNAKE_CASE,'w' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
A__ = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def skip_units(lowercase_ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase_ : str ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
A__ = []
A__ = False
A__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A__ = line.split('"' )[1]
A__ = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
A__ = line.split('"' )[1]
A__ = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
A__ = []
elif "# Replace with" in line and "##" not in line:
A__ = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 716 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 586 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( *lowercase : Any ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = list(lowercase )
for i in range(len(lowercase ) ):
lowerCamelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _SCREAMING_SNAKE_CASE ( lowercase : Exception ):
'''simple docstring'''
lowerCamelCase_ = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowercase , lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _SCREAMING_SNAKE_CASE ( lowercase : callable = None , lowercase : int = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(lowercase , starting_batch_size=lowercase )
lowerCamelCase_ = starting_batch_size
def decorator(*lowercase : Optional[int] , **lowercase : Any ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCamelCase_ = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
lowerCamelCase_ = ', '.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowercase , *lowercase , **lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 70 | '''simple docstring'''
from math import loga
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Any = XGLMTokenizer
UpperCAmelCase : str = XGLMTokenizerFast
UpperCAmelCase : str = True
UpperCAmelCase : Any = True
def __snake_case ( self : Union[str, Any]):
super().setUp()
# We have a SentencePiece fixture for testing
a : int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : Dict):
a : Any = "<pad>"
a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : int):
a : Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(__UpperCAmelCase) , 1008)
def __snake_case ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def __snake_case ( self : str):
a : Optional[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : int = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : List[str] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : str = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : Union[str, Any]):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def __snake_case ( self : Tuple):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name)
a : Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase)
a : Any = pickle.dumps(__UpperCAmelCase)
pickle.loads(__UpperCAmelCase)
def __snake_case ( self : Dict):
if not self.test_rust_tokenizer:
return
a : Optional[int] = self.get_tokenizer()
a : int = self.get_rust_tokenizer()
a : Optional[int] = "I was born in 92000, and this is falsé."
a : List[Any] = tokenizer.tokenize(__UpperCAmelCase)
a : Optional[int] = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : Optional[int] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Any = self.get_rust_tokenizer()
a : Union[str, Any] = tokenizer.encode(__UpperCAmelCase)
a : str = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
@slow
def __snake_case ( self : str):
a : List[Any] = "Hello World!"
a : Optional[Any] = [2, 31227, 4447, 35]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Any):
a : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a : List[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Tuple):
# fmt: off
a : Tuple = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/xglm-564M" , padding=__UpperCAmelCase , )
| 135 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowercase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__lowercase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__lowercase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
__lowercase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__lowercase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Dict):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string")),
"references": datasets.Value("string"),
}) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __snake_case ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=[1, 10, 100] , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple=3.0):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows.")
with ThreadPoolExecutor(max_workers=__UpperCAmelCase) as executor:
a : Tuple = []
a : Any = Counter()
a : List[Any] = 0
a : int = defaultdict(__UpperCAmelCase)
for task_id, (candidates, test_case) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase)):
for candidate in candidates:
a : List[Any] = candidate + "\n" + test_case
a : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
a : Union[str, Any] = executor.submit(__UpperCAmelCase , *__UpperCAmelCase)
futures.append(__UpperCAmelCase)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCAmelCase):
a : Dict = future.result()
results[result["task_id"]].append((result["completion_id"], result))
a , a : List[Any] = [], []
for result in results.values():
result.sort()
a : Union[str, Any] = [r[1]["passed"] for r in result]
total.append(len(__UpperCAmelCase))
correct.append(sum(__UpperCAmelCase))
a : Any = np.array(__UpperCAmelCase)
a : Optional[Any] = np.array(__UpperCAmelCase)
a : List[str] = k
a : Union[str, Any] = {f'''pass@{k}''': estimate_pass_at_k(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowercase ( A_ , A_ , A_ )-> Dict:
'''simple docstring'''
def estimator(A_ , A_ , A_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A_ , A_ ):
a : Dict = itertools.repeat(A_ , len(A_ ) )
else:
assert len(A_ ) == len(A_ )
a : int = iter(A_ )
return np.array([estimator(int(A_ ) , int(A_ ) , A_ ) for n, c in zip(A_ , A_ )] )
| 135 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLMProphetNetTokenizer
snake_case__ = False
snake_case__ = True
def UpperCAmelCase__ (self: Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : str = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__a : Union[str, Any] = "[PAD]"
__a : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase__ (self: Any ) -> str:
'''simple docstring'''
__a : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def UpperCAmelCase__ (self: List[Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCAmelCase__ (self: str ) -> Optional[Any]:
'''simple docstring'''
__a : Dict = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__a : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__a : Dict = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__a : List[Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase__ (self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCAmelCase__ (self: Optional[int] ) -> Dict:
'''simple docstring'''
__a : Optional[int] = "Hello World!"
__a : Optional[int] = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__a : Optional[int] = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 351 |
UpperCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def a_ () -> None:
"""simple docstring"""
__a : Dict = input("Enter message: " )
__a : Dict = input("Enter key [alphanumeric]: " )
__a : str = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
__a : List[str] = "encrypt"
__a : Union[str, Any] = encrypt_message(__A , __A )
elif mode.lower().startswith("d" ):
__a : int = "decrypt"
__a : List[str] = decrypt_message(__A , __A )
print(f'\n{mode.title()}ed message:' )
print(__A )
def a_ (__A , __A ) -> str:
"""simple docstring"""
return translate_message(__A , __A , "encrypt" )
def a_ (__A , __A ) -> str:
"""simple docstring"""
return translate_message(__A , __A , "decrypt" )
def a_ (__A , __A , __A ) -> str:
"""simple docstring"""
__a : Union[str, Any] = []
__a : Tuple = 0
__a : Dict = key.upper()
for symbol in message:
__a : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__A )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__A ):
__a : List[str] = 0
else:
translated.append(__A )
return "".join(__A )
if __name__ == "__main__":
main()
| 351 | 1 |
def __snake_case ( _UpperCAmelCase = 1_00 ):
"""simple docstring"""
lowercase = set()
lowercase = 0
lowercase = n + 1 # maximum limit
for a in range(2 , _UpperCAmelCase ):
for b in range(2 , _UpperCAmelCase ):
lowercase = a**b # calculates the current power
collect_powers.add(_UpperCAmelCase ) # adds the result to the set
return len(_UpperCAmelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 706 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number | (1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number & ~(1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number ^ (1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__A = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__A = '''▁'''
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = BigBirdTokenizer
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = []
def __init__( self : List[str] , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Union[str, Any]="</s>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Optional[Any]="[SEP]" , UpperCAmelCase : str="[MASK]" , UpperCAmelCase : Union[str, Any]="[CLS]" , **UpperCAmelCase : List[Any] , ):
__lowerCamelCase : str = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
__lowerCamelCase : Tuple = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
__lowerCamelCase : int = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
__lowerCamelCase : Optional[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
__lowerCamelCase : Optional[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
__lowerCamelCase : str = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
__lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : List[Any] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : Dict = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,) | 646 | """simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: int ) -> list[int]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Tuple = 2
__lowerCamelCase : str = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
__lowerCamelCase : str = [True] * (end + 1)
__lowerCamelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
__lowerCamelCase : List[str] = False
start += 1
prime += in_prime
__lowerCamelCase : Union[str, Any] = end + 1
__lowerCamelCase : Union[str, Any] = min(2 * end , _lowerCamelCase )
while low <= n:
__lowerCamelCase : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
__lowerCamelCase : Dict = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase : List[str] = high + 1
__lowerCamelCase : Union[str, Any] = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6)) | 646 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 715 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach() | 55 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
lowerCamelCase : Tuple = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase_ = state_dict.pop(lowercase )
# emb -> embedding
if name.startswith('emb.' ):
lowerCamelCase_ = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCamelCase_ = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCamelCase_ = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , lowercase )
# ffn -> feed_forward
lowerCamelCase_ = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCamelCase_ = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCamelCase_ = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCamelCase_ = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCamelCase_ = 'rwkv.' + name
lowerCamelCase_ = weight
return state_dict
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : int=None , lowercase : str=None , lowercase : Any=False , lowercase : Tuple=None ):
'''simple docstring'''
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCamelCase_ = 5_02_77
lowerCamelCase_ = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCamelCase_ = PreTrainedTokenizerFast(tokenizer_file=lowercase )
lowerCamelCase_ = len(lowercase )
tokenizer.save_pretrained(lowercase )
# 2. Build the config
lowerCamelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase_ = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase_ = RwkvConfig(
vocab_size=lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase )
# 3. Download model file then convert state_dict
lowerCamelCase_ = hf_hub_download(lowercase , lowercase )
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = convert_state_dict(lowercase )
# 4. Split in shards and save
lowerCamelCase_ , lowerCamelCase_ = shard_checkpoint(lowercase )
for shard_file, shard in shards.items():
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if index is not None:
lowerCamelCase_ = os.path.join(lowercase , lowercase )
# Save the index as well
with open(lowercase , 'w' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '\n'
f.write(lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCamelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase_ = torch.load(os.path.join(lowercase , lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase , lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(lowercase )
model.push_to_hub(lowercase , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 70 |
def __lowerCAmelCase ( __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
_lowercase: Optional[Any] = str(abs(__magic_name__ ) )
_lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 226 | 0 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : str=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : Any = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Any = embedding_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : List[Any] = scope
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MobileBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
__UpperCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = MobileBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : int = MobileBertForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Dict = MobileBertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MobileBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : Optional[int] = MobileBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.num_choices
__UpperCAmelCase : Dict = MobileBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = config_and_inputs
__UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=False ):
"""simple docstring"""
__UpperCAmelCase : int = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
__UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
__UpperCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = MobileBertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_ )
def __UpperCamelCase ( _UpperCAmelCase ):
return torch.tensor(
_UpperCAmelCase, dtype=torch.long, device=_UpperCAmelCase, )
lowerCAmelCase__ : Optional[int] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(UpperCAmelCase_ )
__UpperCAmelCase : Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(UpperCAmelCase_ )[0]
__UpperCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , UpperCAmelCase_ )
__UpperCAmelCase : List[str] = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=UpperCAmelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCAmelCase : int = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 329 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = DepthEstimationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase_ )
import datasets
__UpperCAmelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase : Dict = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , UpperCAmelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "Intel/dpt-large"
__UpperCAmelCase : Optional[int] = pipeline("depth-estimation" , model=UpperCAmelCase_ )
__UpperCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 329 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
SCREAMING_SNAKE_CASE = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 199 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : Dict , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase__ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase__ : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding="VALID" , groups=snake_case__ , use_bias=snake_case__ , name="convolution" , )
UpperCAmelCase__ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
UpperCAmelCase__ : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self : str , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.convolution(self.padding(snake_case__ ) )
UpperCAmelCase__ : Dict = self.normalization(snake_case__ )
UpperCAmelCase__ : Any = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , **snake_case__ : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = config.num_channels
UpperCAmelCase__ : Optional[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase__ : List[str] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
UpperCAmelCase__ : Dict = self.embedder(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name="convolution" )
UpperCAmelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def UpperCamelCase ( self : str , snake_case__ : tf.Tensor , snake_case__ : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
UpperCAmelCase__ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.pooler(snake_case__ )
for layer_module in self.attention:
UpperCAmelCase__ : Dict = layer_module(snake_case__ )
UpperCAmelCase__ : int = hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : str ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Optional[Any] = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Optional[int] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Tuple = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase__ : int = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.2" ),
]
UpperCAmelCase__ : List[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : List[str] = layer_module(snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.shortcut(snake_case__ )
hidden_state += residual
UpperCAmelCase__ : Union[str, Any] = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Union[str, Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Dict = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
UpperCAmelCase__ : Optional[Any] = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.3" ),
]
UpperCAmelCase__ : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : Tuple = layer_module(snake_case__ )
UpperCAmelCase__ : Dict = self.shortcut(snake_case__ )
hidden_state += residual
UpperCAmelCase__ : List[Any] = self.activation(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : Any ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase__ : int = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name="layers.0" ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
for layer_module in self.layers:
UpperCAmelCase__ : Tuple = layer_module(snake_case__ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : RegNetConfig , **snake_case__ : Any ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
UpperCAmelCase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F"""stages.{i+1}""" ) )
def UpperCamelCase ( self : Dict , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = hidden_states + (hidden_state,)
UpperCAmelCase__ : Dict = stage_module(snake_case__ )
if output_hidden_states:
UpperCAmelCase__ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowercase_ : Dict = RegNetConfig
def __init__( self : int , snake_case__ : str , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : List[Any] = config
UpperCAmelCase__ : Dict = TFRegNetEmbeddings(snake_case__ , name="embedder" )
UpperCAmelCase__ : int = TFRegNetEncoder(snake_case__ , name="encoder" )
UpperCAmelCase__ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
@unpack_inputs
def UpperCamelCase ( self : Dict , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : str = self.embedder(snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Optional[int] = encoder_outputs[0]
UpperCAmelCase__ : Optional[Any] = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase__ : List[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
UpperCAmelCase__ : Any = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase__ : Optional[int] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : List[Any] = RegNetConfig
lowercase_ : Optional[int] = "regnet"
lowercase_ : Optional[Any] = "pixel_values"
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , A , )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , *snake_case__ : Any , **snake_case__ : List[str] ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
UpperCAmelCase__ : Any = TFRegNetMainLayer(snake_case__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : int=False , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : List[str] = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A , )
class UpperCAmelCase_ ( A , A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : RegNetConfig , *snake_case__ : Optional[Any] , **snake_case__ : int ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
UpperCAmelCase__ : Optional[Any] = config.num_labels
UpperCAmelCase__ : str = TFRegNetMainLayer(snake_case__ , name="regnet" )
# classification head
UpperCAmelCase__ : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : List[str] , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : List[Any]=False , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Union[str, Any] = self.classifier[0](snake_case__ )
UpperCAmelCase__ : str = self.classifier[1](snake_case__ )
UpperCAmelCase__ : Any = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
UpperCAmelCase__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 199 | 1 |
import argparse
import struct
import unittest
class _a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
_lowercase =data
# Initialize hash values
_lowercase =[
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
_lowercase =[
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
_lowercase =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ ):
_lowercase =b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowercase =struct.pack(">Q" , (len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCAmelCase ( self ):
_lowercase =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowercase =list(struct.unpack(">16L" , _A ) )
# add 48 0-ed integers
words += [0] * 48
_lowercase =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowercase =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_lowercase =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_lowercase =(
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
_lowercase =self.ror(_A , 6 ) ^ self.ror(_A , 11 ) ^ self.ror(_A , 25 )
_lowercase =(e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
_lowercase =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
_lowercase =self.ror(_A , 2 ) ^ self.ror(_A , 13 ) ^ self.ror(_A , 22 )
_lowercase =(a & b) ^ (a & c) ^ (b & c)
_lowercase =(sa + maj) % 0x1_00_00_00_00
_lowercase =(
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
_lowercase =[a, b, c, d, e, f, g, h]
# Modify final values
_lowercase =[
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
_lowercase =''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
import hashlib
_lowercase =bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_A ).hash , hashlib.shaaaa(_A ).hexdigest() )
def __lowerCamelCase ( ) -> Optional[int]:
import doctest
doctest.testmod()
_lowercase =argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
_lowercase =parser.parse_args()
_lowercase =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_lowercase =f.read()
else:
_lowercase =bytes(_lowerCamelCase , "utf-8" )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 716 | import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
_lowercase =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 594 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = [parquet_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str=("train",) ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if split:
__UpperCAmelCase = {split: parquet_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
__UpperCAmelCase = {'''image''': [image_path]}
__UpperCAmelCase = Features({'''image''': Image()} )
__UpperCAmelCase = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
"""simple docstring"""
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 262 | '''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ):
"""simple docstring"""
def identity_function(UpperCamelCase__ : float ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
def function_to_integrate(UpperCamelCase__ : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 1 |
lowercase__ ={
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def __UpperCamelCase ( lowerCAmelCase__ : float ):
assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ )
__a : Optional[int] = int(lowerCAmelCase__ )
__a : Dict = ''''''
__a : List[Any] = False
if decimal < 0:
__a : Dict = True
decimal *= -1
while decimal > 0:
__a : List[Any] = divmod(lowerCAmelCase__ , 1_6 )
__a : Dict = values[remainder] + hexadecimal
__a : Union[str, Any] = '''0x''' + hexadecimal
if negative:
__a : List[Any] = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = ["input_features", "attention_mask"]
def __init__(self : Dict , snake_case_ : Tuple=8_0 , snake_case_ : Tuple=1_6_0_0_0 , snake_case_ : Union[str, Any]=8_0 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=True , snake_case_ : Any=True , snake_case_ : int=True , **snake_case_ : Dict , ):
super().__init__(feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ )
__a : int = num_mel_bins
__a : Dict = do_ceptral_normalize
__a : Union[str, Any] = normalize_means
__a : int = normalize_vars
__a : Optional[Any] = True
def lowerCAmelCase (self : Any , snake_case_ : np.ndarray , ):
__a : Union[str, Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__a : Any = torch.from_numpy(snake_case_ ).unsqueeze(0 )
__a : List[Any] = ta_kaldi.fbank(snake_case_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase (snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : Optional[bool] = True , snake_case_ : Optional[bool] = True , snake_case_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__a : Optional[int] = x[:input_length].mean(axis=0 )
__a : Optional[int] = np.subtract(snake_case_ , snake_case_ )
if normalize_vars:
__a : Optional[Any] = x[:input_length].std(axis=0 )
__a : Optional[Any] = np.divide(snake_case_ , snake_case_ )
if input_length < x.shape[0]:
__a : Optional[int] = padding_value
# make sure array is in float32
__a : Tuple = x.astype(np.floataa )
return x
def lowerCAmelCase (self : List[Any] , snake_case_ : List[np.ndarray] , snake_case_ : Optional[np.ndarray] = None ):
__a : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case_ , snake_case_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(snake_case_ , snake_case_ )
]
def __call__(self : List[str] , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , **snake_case_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a : Dict = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__a : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
__a : Optional[int] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Dict = [raw_speech]
# extract fbank features
__a : Union[str, Any] = [self._extract_fbank_features(snake_case_ ) for waveform in raw_speech]
# convert into correct format for padding
__a : str = BatchFeature({'''input_features''': features} )
__a : Union[str, Any] = self.pad(
snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
# make sure list is in array format
__a : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , snake_case_ ):
__a : List[str] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_features]
__a : Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a : int = (
np.array(snake_case_ , dtype=np.intaa )
if self._get_padding_strategies(snake_case_ , max_length=snake_case_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a : List[str] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=snake_case_ )
if return_tensors is not None:
__a : Optional[int] = padded_inputs.convert_to_tensors(snake_case_ )
return padded_inputs
| 326 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _snake_case ( __snake_case ):
_UpperCamelCase = prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) ->List[Any]:
lowercase_ = tesseract_config if tesseract_config is not None else """"""
# apply OCR
lowercase_ = to_pil_image(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = pil_image.size
lowercase_ = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type="""dict""" , config=SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowercase_ = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()]
lowercase_ = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE_ )
# finally, normalize the bounding boxes
lowercase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : Tuple , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "" , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = apply_ocr
lowercase_ = ocr_lang
lowercase_ = tesseract_config
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase_ = (size["""height"""], size["""width"""])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Dict , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowercase_ = []
lowercase_ = []
for image in images:
lowercase_ , lowercase_ = apply_tesseract(lowercase_ , lowercase_ , lowercase_ )
words_batch.append(lowercase_ )
boxes_batch.append(lowercase_ )
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase_ = [flip_channel_order(lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase_ )
if apply_ocr:
lowercase_ = words_batch
lowercase_ = boxes_batch
return data
| 451 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = self.dummy_uncond_unet
UpperCAmelCase__ : Dict = KarrasVeScheduler()
UpperCAmelCase__ : Any = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase__ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' , return_dict=_UpperCAmelCase )[0]
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = KarrasVeScheduler()
UpperCAmelCase__ : int = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe(num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ : Any = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 599 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase_ = "<<<<<<< This should probably be modified because it mentions: "
UpperCamelCase_ = "=======\n>>>>>>>\n"
UpperCamelCase_ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
UpperCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowerCAmelCase__ ( a_ : Namespace ) -> str:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
UpperCAmelCase__ : Tuple = get_logger('''datasets-cli/converting''' )
UpperCAmelCase__ : Tuple = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def lowerCamelCase ( self ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase__ : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
UpperCAmelCase__ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : str = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ : List[str] = os.listdir(_UpperCAmelCase )
else:
UpperCAmelCase__ : str = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
UpperCAmelCase__ : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : List[str] = f.readlines()
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = []
for line in lines:
UpperCAmelCase__ : List[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Dict = ''''''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
UpperCAmelCase__ : List[str] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Tuple = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : Optional[Any] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
UpperCAmelCase__ : str = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : Dict = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : List[str] = f_name.replace('''.py''' , '''''' )
UpperCAmelCase__ : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[Any] = os.path.basename(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" ) | 599 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_lowercase: Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowercase_ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_lowercase: Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_lowercase: Tuple = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_lowercase: Optional[int] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowercase: Any = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowercase: Any = DDPMScheduler()
_lowercase: Tuple = AudioDiffusionPipeline(vqvae=A_ , unet=self.dummy_unet , mel=A_ , scheduler=A_ )
_lowercase: List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Tuple = torch.Generator(device=A_ ).manual_seed(42 )
_lowercase: Dict = pipe(generator=A_ , steps=4 )
_lowercase: Any = output.audios[0]
_lowercase: List[Any] = output.images[0]
_lowercase: List[Any] = torch.Generator(device=A_ ).manual_seed(42 )
_lowercase: str = pipe(generator=A_ , steps=4 , return_dict=A_ )
_lowercase: Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowercase: Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_lowercase: Dict = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_lowercase: Tuple = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowercase: List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowercase: Dict = DDIMScheduler()
_lowercase: Optional[Any] = self.dummy_vqvae_and_unet
_lowercase: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A_ , scheduler=A_ )
_lowercase: int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
_lowercase: Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowercase: Union[str, Any] = torch.Generator(device=A_ ).manual_seed(42 )
_lowercase: List[Any] = pipe(raw_audio=A_ , generator=A_ , start_step=5 , steps=10 )
_lowercase: Optional[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowercase: List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_lowercase: Optional[int] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowercase: Union[str, Any] = self.dummy_unet_condition
_lowercase: str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A_ , mel=A_ , scheduler=A_ )
_lowercase: Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
_lowercase: Union[str, Any] = torch.rand((1, 1, 10) )
_lowercase: Optional[Any] = pipe(generator=A_ , encoding=A_ )
_lowercase: str = output.images[0]
_lowercase: Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_lowercase: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Dict = torch_device
_lowercase: List[Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_lowercase: Dict = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowercase: Tuple = torch.Generator(device=A_ ).manual_seed(42 )
_lowercase: Union[str, Any] = pipe(generator=A_ )
_lowercase: str = output.audios[0]
_lowercase: Union[str, Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowercase: Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_lowercase: Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 353 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCamelCase , max_perimeter + 1 ):
_lowercase: str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCamelCase ):
_lowercase: str = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( _UpperCamelCase = 1_000 ):
"""simple docstring"""
_lowercase: int = pythagorean_triple(_UpperCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 353 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = "arrow" , **UpperCamelCase_ , ):
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : str = load_from_cache_file
__UpperCAmelCase : Dict = file_format
__UpperCAmelCase : Union[str, Any] = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def _snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__UpperCAmelCase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: int = """table-transformer"""
SCREAMING_SNAKE_CASE_: int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_: int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __a=True , __a=None , __a=3 , __a=100 , __a=6 , __a=2048 , __a=8 , __a=6 , __a=2048 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=256 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__a , __a ):
A__ = backbone_config.get('model_type' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(__a )
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.d_model
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Tuple = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return 1E-5
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return 12
| 260 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , 'r' , encoding='utf-8' ) as f:
A__ = json.load(__a )
else:
try:
A__ = baseaa.urlsafe_baadecode(__a ).decode('utf-8' )
A__ = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
A__ = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_value('zero_optimization.stage' , -1 )
# offload
A__ = False
if self.is_zeroa() or self.is_zeroa():
A__ = set(['cpu', 'nvme'] )
A__ = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ = True
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
A__ = nodes.pop()
for node in nodes:
A__ = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , __a , __a=None ):
"""simple docstring"""
A__ , A__ = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
for node in nodes:
A__ = config
A__ = config.get(__a )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else bool(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else not bool(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 2
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 3
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._offload
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = engine
def _UpperCAmelCase ( self , __a , **__a ):
"""simple docstring"""
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
super().__init__(__a , device_placement=__a , scaler=__a )
A__ = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , __a=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
super().__init__(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=0.001 , __a=0 , **__a ):
"""simple docstring"""
A__ = params
A__ = lr
A__ = weight_decay
A__ = kwargs
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=None , __a=0 , **__a ):
"""simple docstring"""
A__ = optimizer
A__ = total_num_steps
A__ = warmup_num_steps
A__ = kwargs
| 260 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase( SCREAMING_SNAKE_CASE_ ):
snake_case_ : int = (UniPCMultistepScheduler,)
snake_case_ : Optional[int] = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**UpperCamelCase__ )
__snake_case = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
__snake_case = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__snake_case = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
__snake_case = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__snake_case = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**UpperCamelCase__ )
__snake_case = scheduler_class(**UpperCamelCase__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**UpperCamelCase__ )
__snake_case = scheduler_class(**UpperCamelCase__ )
__snake_case = 1_0
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(UpperCamelCase__ , UpperCamelCase__ )
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**UpperCamelCase__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , "set_timesteps" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , "set_timesteps" ):
__snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
__snake_case = scheduler.timesteps[5]
__snake_case = scheduler.timesteps[6]
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case = UniPCMultistepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=UpperCamelCase__ )
__snake_case = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=UpperCamelCase__ )
__snake_case = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
__snake_case = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case = self.full_loop(prediction_type="v_prediction" )
__snake_case = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**UpperCamelCase__ )
__snake_case = 1_0
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(UpperCamelCase__ , UpperCamelCase__ )
__snake_case = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ) -> str:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**UpperCamelCase__ )
__snake_case = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 711 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "" ) -> dict[str, float]:
'''simple docstring'''
__snake_case = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__snake_case = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
__snake_case = soup.find_all("td" , attrs="titleColumn" )
__snake_case = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def _lowerCAmelCase ( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
__snake_case = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , "w" , newline="" ) as out_file:
__snake_case = csv.writer(_lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 473 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = XLMProphetNetTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = XLMProphetNetTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = '''[PAD]'''
snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A__ ) , 10_12 )
def UpperCamelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = XLMProphetNetTokenizer(A__ , keep_accents=A__ )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
snake_case = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def UpperCamelCase ( self ) -> Union[str, Any]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = '''Hello World!'''
snake_case = [3_53_89, 66_72, 49, 2]
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def UpperCamelCase ( self ) -> int:
# fmt: off
snake_case = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 342 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=24 , A__=2 , A__=6 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=None , A__=10_00 , ) -> int:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = range_bbox
def UpperCamelCase ( self ) -> List[Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = t
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self ) -> Dict:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Union[str, Any]:
snake_case = LiltModel(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ )
snake_case = model(A__ , bbox=A__ , token_type_ids=A__ )
snake_case = model(A__ , bbox=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]:
snake_case = self.num_labels
snake_case = LiltForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(
A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[str]:
snake_case = LiltForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(
A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( __a , __a , __a , unittest.TestCase ):
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
return True
def UpperCamelCase ( self ) -> int:
snake_case = LiltModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
@slow
def UpperCamelCase ( self ) -> Dict:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = LiltModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> List[Any]:
snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(A__ )
snake_case = torch.tensor([[1, 2]] , device=A__ )
snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A__ )
# forward pass
with torch.no_grad():
snake_case = model(input_ids=A__ , bbox=A__ )
snake_case = torch.Size([1, 2, 7_68] )
snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=A__ , )
self.assertTrue(outputs.last_hidden_state.shape , A__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A__ , atol=1e-3 ) )
| 342 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : str , _a : List[Any] , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : Optional[int]=True , _a : Dict=True , _a : str=True , _a : Optional[Any]=True , _a : Union[str, Any]=99 , _a : Tuple=32 , _a : Optional[int]=5 , _a : List[Any]=4 , _a : List[Any]=37 , _a : Dict="gelu" , _a : Tuple=0.1 , _a : Optional[Any]=0.1 , _a : List[str]=512 , _a : List[Any]=16 , _a : Union[str, Any]=2 , _a : int=0.02 , _a : Dict=False , _a : Union[str, Any]=True , _a : Dict="None" , _a : Union[str, Any]=3 , _a : int=4 , _a : Optional[int]=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def A_ ( self : Optional[int] ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Dict ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A_ ( self : str , _a : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A_ ( self : Optional[Any] , _a : List[Any] , _a : str , _a : Dict , _a : Tuple , _a : Tuple , _a : Dict , _a : Tuple ):
UpperCamelCase__ = DebertaVaModel(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(_a , attention_mask=_a , token_type_ids=_a )[0]
UpperCamelCase__ = model(_a , token_type_ids=_a )[0]
UpperCamelCase__ = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A_ ( self : List[Any] , _a : Tuple , _a : str , _a : List[Any] , _a : Any , _a : Union[str, Any] , _a : Optional[int] , _a : Any ):
UpperCamelCase__ = DebertaVaForMaskedLM(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , _a : Union[str, Any] , _a : List[Any] , _a : Dict , _a : Dict , _a : List[Any] , _a : Optional[Any] , _a : int ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def A_ ( self : int , _a : Optional[int] , _a : Any , _a : int , _a : List[Any] , _a : int , _a : Optional[int] , _a : int ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : int , _a : Union[str, Any] , _a : Optional[Any] , _a : str , _a : Dict , _a : Optional[int] , _a : Tuple , _a : Dict ):
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Union[str, Any] , _a : int , _a : Tuple , _a : Tuple , _a : Optional[int] , _a : Optional[Any] , _a : Optional[int] , _a : Any ):
UpperCamelCase__ = DebertaVaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Any ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
_A : int = False
_A : Optional[Any] = False
_A : Optional[Any] = False
_A : List[str] = False
def A_ ( self : Dict ):
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a , hidden_size=37 )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_a )
@slow
def A_ ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def A_ ( self : List[str] ):
pass
@slow
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCamelCase__ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 591 | def lowerCamelCase_ ( UpperCamelCase__ : int = 100 ):
'''simple docstring'''
UpperCamelCase__ = (n * (n + 1) // 2) ** 2
UpperCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 591 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCamelCase ( snake_case__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = str(snake_case__ )
A_ : Any = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __UpperCamelCase ( snake_case__ ):
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def __UpperCamelCase ( snake_case__ = 11 ):
A_ : list[int] = []
A_ : str = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
A_ : List[str] = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def __UpperCamelCase ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 480 |
"""simple docstring"""
import math
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :str , SCREAMING_SNAKE_CASE :int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :str = "fourier" , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :float = 0.0 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE :Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE :Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :int = 8 , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :bool = False , ) -> str:
'''simple docstring'''
super().__init__()
_a : Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
_a : Any =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE , log=SCREAMING_SNAKE_CASE , flip_sin_to_cos=SCREAMING_SNAKE_CASE )
_a : Dict =2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a : str =Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE , downscale_freq_shift=SCREAMING_SNAKE_CASE )
_a : int =block_out_channels[0]
if use_timestep_embedding:
_a : Optional[Any] =block_out_channels[0] * 4
_a : Union[str, Any] =TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE , time_embed_dim=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , out_dim=block_out_channels[0] , )
_a : Dict =nn.ModuleList([] )
_a : Dict =None
_a : int =nn.ModuleList([] )
_a : Any =None
# down
_a : str =in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
_a : Tuple =output_channel
_a : Dict =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a : int =i == len(SCREAMING_SNAKE_CASE ) - 1
_a : Optional[Any] =get_down_block(
SCREAMING_SNAKE_CASE , num_layers=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
_a : List[Any] =get_mid_block(
SCREAMING_SNAKE_CASE , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE , add_downsample=SCREAMING_SNAKE_CASE , )
# up
_a : str =list(reversed(SCREAMING_SNAKE_CASE ) )
_a : List[Any] =reversed_block_out_channels[0]
if out_block_type is None:
_a : List[str] =out_channels
else:
_a : List[str] =block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
_a : Dict =output_channel
_a : Optional[int] =(
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE ) - 1 else final_upsample_channels
)
_a : Dict =i == len(SCREAMING_SNAKE_CASE ) - 1
_a : Union[str, Any] =get_up_block(
SCREAMING_SNAKE_CASE , num_layers=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
_a : Optional[int] =output_channel
# out
_a : str =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_a : Union[str, Any] =get_out_block(
out_block_type=SCREAMING_SNAKE_CASE , num_groups_out=SCREAMING_SNAKE_CASE , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
_a : Any =timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE ):
_a : str =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
_a : Any =timesteps[None].to(sample.device )
_a : Any =self.time_proj(SCREAMING_SNAKE_CASE )
if self.config.use_timestep_embedding:
_a : Dict =self.time_mlp(SCREAMING_SNAKE_CASE )
else:
_a : Any =timestep_embed[..., None]
_a : Dict =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a : List[Any] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a : Tuple =()
for downsample_block in self.down_blocks:
_a , _a : Optional[Any] =downsample_block(hidden_states=SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a : Optional[Any] =self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a : List[str] =down_block_res_samples[-1:]
_a : Optional[int] =down_block_res_samples[:-1]
_a : Optional[int] =upsample_block(SCREAMING_SNAKE_CASE , res_hidden_states_tuple=SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE )
# 5. post-process
if self.out_block:
_a : List[Any] =self.out_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
_a = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase__ )
if number < 1:
_a = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase__ )
_a = 1
for i in range(1, lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : List[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : Tuple = do_resize
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Any = do_center_crop
UpperCAmelCase_ : int = crop_size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : Any = image_mean
UpperCAmelCase_ : List[Any] = image_std
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = LevitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : int = LevitImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=224 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : int = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 95 | 1 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int )-> float:
"""simple docstring"""
a =x
a =y
for step in range(UpperCAmelCase_ ): # noqa: B007
a =a * a - b * b + x
a =2 * a * b + y
a =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase_ : float )-> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase_ : float )-> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase_ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase_ : int = 800 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = -0.6 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 3.2 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : bool = True , )-> Image.Image:
"""simple docstring"""
a =Image.new("""RGB""" , (image_width, image_height) )
a =img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase_ ):
for image_y in range(UpperCAmelCase_ ):
# determine the figure-coordinates based on the image-coordinates
a =figure_width / image_width * image_height
a =figure_center_x + (image_x / image_width - 0.5) * figure_width
a =figure_center_y + (image_y / image_height - 0.5) * figure_height
a =get_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a =get_color_coded_rgb(UpperCAmelCase_ )
else:
a =get_black_and_white_rgb(UpperCAmelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 321 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int )-> int:
"""simple docstring"""
a =_distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any )-> Any:
"""simple docstring"""
a =_split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
a =_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 321 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCAmelCase ( lowercase = 2_00_00_00 ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 42
for idx in range(1 ,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCAmelCase = 0
# an estimate of b, using the quadratic formula
_UpperCAmelCase = 42
# the largest integer less than b_estimate
_UpperCAmelCase = 42
# the largest integer less than b_estimate
_UpperCAmelCase = 42
# the triangle number corresponding to b_floor
_UpperCAmelCase = 42
# the triangle number corresponding to b_ceil
_UpperCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] ,1 ):
_UpperCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCAmelCase = floor(lowercase )
_UpperCAmelCase = ceil(lowercase )
_UpperCAmelCase = triangle_numbers[b_floor]
_UpperCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase = triangle_b_first_guess * triangle_a
_UpperCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase = triangle_b_second_guess * triangle_a
_UpperCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 277 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "autoformer"
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Dict = prediction_length
_UpperCAmelCase : Tuple = context_length if context_length is not None else prediction_length
_UpperCAmelCase : List[Any] = distribution_output
_UpperCAmelCase : Dict = loss
_UpperCAmelCase : List[str] = input_size
_UpperCAmelCase : Optional[int] = num_time_features
_UpperCAmelCase : Dict = lags_sequence
_UpperCAmelCase : str = scaling
_UpperCAmelCase : str = num_dynamic_real_features
_UpperCAmelCase : Any = num_static_real_features
_UpperCAmelCase : List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Any = cardinality
else:
_UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Union[str, Any] = embedding_dimension
else:
_UpperCAmelCase : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Any = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : str = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : Tuple = dropout
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : List[str] = activation_dropout
_UpperCAmelCase : List[Any] = encoder_layerdrop
_UpperCAmelCase : int = decoder_layerdrop
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : Union[str, Any] = init_std
_UpperCAmelCase : int = use_cache
# Autoformer
_UpperCAmelCase : List[Any] = label_length
_UpperCAmelCase : str = moving_average
_UpperCAmelCase : str = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 717 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( UpperCAmelCase ):
_lowercase = 1
_lowercase = 2
_lowercase = 3
_lowercase = 4
_lowercase = 5
@dataclass
class a ( UpperCAmelCase ):
_lowercase = 42
class a :
_lowercase = SCHEDULER_CONFIG_NAME
_lowercase = ["dtype"]
_lowercase = []
_lowercase = True
@classmethod
def _UpperCAmelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCAmelCase : Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase ( self , A_ , A_ = False , **A_ ):
'''simple docstring'''
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase : Optional[Any] = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase : Dict = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: jnp.ndarray , lowerCAmelCase: Tuple[int] ) -> jnp.ndarray:
assert len(lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase ) - x.ndim) ) , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple=0.999 , lowerCAmelCase: int=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCAmelCase: Union[str, Any] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCAmelCase : str = []
for i in range(lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = i / num_diffusion_timesteps
_UpperCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCAmelCase ) / alpha_bar(lowerCAmelCase ) , lowerCAmelCase ) )
return jnp.array(lowerCAmelCase , dtype=lowerCAmelCase )
@flax.struct.dataclass
class a :
_lowercase = 42
_lowercase = 42
_lowercase = 42
@classmethod
def _UpperCAmelCase ( cls , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = scheduler.config
if config.trained_betas is not None:
_UpperCAmelCase : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCAmelCase : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_UpperCAmelCase : Optional[int] = 1.0 - betas
_UpperCAmelCase : int = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = state.alphas_cumprod
_UpperCAmelCase : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase : str = sqrt_alpha_prod.flatten()
_UpperCAmelCase : List[Any] = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
_UpperCAmelCase : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_UpperCAmelCase : int = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase : int = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 467 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_snake_case , _snake_case : Any = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_snake_case : int = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_snake_case : List[Any] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case : Optional[int] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: str = logging.get_logger(__name__)
__a: List[Any] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "roberta"
def __init__( self , __lowerCAmelCase=50265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase__ : Dict = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : List[str] = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : List[str] = position_embedding_type
lowercase__ : List[Any] = use_cache
lowercase__ : Dict = classifier_dropout
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 710 | '''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : Tuple = position
lowercase__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ : Tuple = []
for position in positions:
lowercase__ , lowercase__ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase )
return permissible_positions
def __UpperCamelCase ( UpperCAmelCase ):
return not any(elem == 0 for row in board for elem in row )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if is_complete(UpperCAmelCase ):
return True
for position in get_valid_pos(UpperCAmelCase , len(UpperCAmelCase ) ):
lowercase__ , lowercase__ : List[Any] = position
if board[y][x] == 0:
lowercase__ : Optional[Any] = curr + 1
if open_knight_tour_helper(UpperCAmelCase , UpperCAmelCase , curr + 1 ):
return True
lowercase__ : Tuple = 0
return False
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = [[0 for i in range(UpperCAmelCase )] for j in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = 1
if open_knight_tour_helper(UpperCAmelCase , (i, j) , 1 ):
return board
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return str(__UpperCamelCase ) == str(__UpperCamelCase )[::-1]
def __lowercase ( snake_case ):
"""simple docstring"""
return int(__UpperCamelCase ) + int(str(__UpperCamelCase )[::-1] )
def __lowercase ( snake_case = 1_0_0_0_0 ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for num in range(1, __UpperCamelCase ):
__magic_name__ :Union[str, Any] = 0
__magic_name__ :Optional[Any] = num
while iterations < 5_0:
__magic_name__ :Optional[int] = sum_reverse(__UpperCamelCase )
iterations += 1
if is_palindrome(__UpperCamelCase ):
break
else:
lychrel_nums.append(__UpperCamelCase )
return len(__UpperCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = '▁'
snake_case : Tuple = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
snake_case : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
snake_case : str = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
snake_case : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
snake_case : Union[str, Any] = {'mustc': MUSTC_LANGS}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = MAX_MODEL_INPUT_SIZES
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_upper_case=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , lang_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowercase = do_upper_case
__lowercase = do_lower_case
__lowercase = load_json(__UpperCAmelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = spm_file
__lowercase = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowercase = lang_codes
__lowercase = LANGUAGES[lang_codes]
__lowercase = [F'''<lang:{lang}>''' for lang in self.langs]
__lowercase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__lowercase = self.lang_tokens
__lowercase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowercase = {}
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.lang_code_to_id[tgt_lang]
__lowercase = [lang_code_id]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowercase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = Path(__UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Dict[str, Any] ):
'''simple docstring'''
__lowercase = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" ) as f:
return json.load(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 566 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_A = logging.get_logger(__name__)
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str
_snake_case : str = None
@staticmethod
def A ( )-> Union[str, Any]:
raise NotImplementedError
def A ( self : Any , A_ : Optional[Any] , A_ : int , A_ : str , **A_ : Union[str, Any] )-> Union[str, Any]:
raise NotImplementedError
def A ( self : List[str] , A_ : str )-> str:
raise NotImplementedError
def A ( self : Any )-> Union[str, Any]:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def A ( cls : int )-> List[Any]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Tuple = 'optuna'
@staticmethod
def A ( )-> str:
return is_optuna_available()
def A ( self : int , A_ : str , A_ : int , A_ : str , **A_ : Union[str, Any] )-> List[str]:
return run_hp_search_optuna(A_ , A_ , A_ , **A_ )
def A ( self : str , A_ : Any )-> Optional[Any]:
return default_hp_space_optuna(A_ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : List[str] = 'ray'
_snake_case : Optional[int] = '\'ray[tune]\''
@staticmethod
def A ( )-> Tuple:
return is_ray_available()
def A ( self : Optional[int] , A_ : Tuple , A_ : int , A_ : str , **A_ : str )-> List[str]:
return run_hp_search_ray(A_ , A_ , A_ , **A_ )
def A ( self : Any , A_ : Union[str, Any] )-> Tuple:
return default_hp_space_ray(A_ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Tuple = 'sigopt'
@staticmethod
def A ( )-> int:
return is_sigopt_available()
def A ( self : List[Any] , A_ : Optional[int] , A_ : int , A_ : str , **A_ : Optional[int] )-> List[Any]:
return run_hp_search_sigopt(A_ , A_ , A_ , **A_ )
def A ( self : Union[str, Any] , A_ : Dict )-> Union[str, Any]:
return default_hp_space_sigopt(A_ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'wandb'
@staticmethod
def A ( )-> List[Any]:
return is_wandb_available()
def A ( self : Union[str, Any] , A_ : int , A_ : int , A_ : str , **A_ : Union[str, Any] )-> List[Any]:
return run_hp_search_wandb(A_ , A_ , A_ , **A_ )
def A ( self : Tuple , A_ : Dict )-> Dict:
return default_hp_space_wandb(A_ )
_A = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase () -> str:
'''simple docstring'''
__UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
__UpperCamelCase = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
f"""{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 228 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] , A_ : str )-> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def A ( self : Tuple )-> int:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> int:
__UpperCamelCase = "sgugger/tiny-distilbert-classification"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> Tuple:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
__UpperCamelCase = None
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any )-> List[str]:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Tuple )-> Optional[int]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A_ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A_ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A_ , "train_time.csv" ) , env_info_csv_file=os.path.join(A_ , "env.csv" ) , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "env.csv" ) ).exists() )
def A ( self : List[Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A_ : List[str] ):
self.assertTrue(hasattr(A_ , "sequential" ) )
self.assertTrue(hasattr(A_ , "cumulative" ) )
self.assertTrue(hasattr(A_ , "current" ) )
self.assertTrue(hasattr(A_ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , "log.txt" ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , "log.txt" ) ).exists() ) | 228 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _A ):
_a : List[str] = ''
_a : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_a : str = None # compression type in fsspec. ex: "gzip"
_a : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , a = "" , a = None , a = None , **a ):
super().__init__(self , **a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ : Union[str, Any] =fsspec.open(
a , mode="""rb""" , protocol=a , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ : Dict =os.path.basename(self.file.path.split("""::""" )[0] )
snake_case__ : Tuple =(
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ : int =None
@classmethod
def lowercase__ ( cls , a ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(a ).lstrip("""/""" )
def lowercase__ ( self ):
if self.dir_cache is None:
snake_case__ : Any ={**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
snake_case__ : str ={f["""name"""]: f}
def lowercase__ ( self , a ):
return self.file.open().read()
def lowercase__ ( self , a , a = "rb" , a=None , a=True , a=None , **a , ):
snake_case__ : List[Any] =self._strip_protocol(a )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowercase ( _A ):
_a : List[Any] = 'bz2'
_a : Optional[Any] = 'bz2'
_a : int = '.bz2'
class _lowercase ( _A ):
_a : int = 'gzip'
_a : Tuple = 'gzip'
_a : List[str] = '.gz'
class _lowercase ( _A ):
_a : Tuple = 'lz4'
_a : Tuple = 'lz4'
_a : Optional[Any] = '.lz4'
class _lowercase ( _A ):
_a : Any = 'xz'
_a : str = 'xz'
_a : Dict = '.xz'
class _lowercase ( _A ):
_a : str = 'zstd'
_a : Optional[Any] = 'zstd'
_a : List[str] = '.zst'
def __init__( self , a , a = "rb" , a = None , a = None , a = DEFAULT_BLOCK_SIZE , **a , ):
super().__init__(
fo=a , mode=a , target_protocol=a , target_options=a , block_size=a , **a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ : Optional[int] =self.file.__enter__
class _lowercase :
def __init__( self , a ):
snake_case__ : int =file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *a , **a ):
self._file.__exit__(*a , **a )
def __iter__( self ):
return iter(self._file )
def lowercase__ ( self ):
return next(self._file )
def __getattr__( self , a ):
return getattr(self._file , a )
def fixed_enter(*a , **a ):
return WrappedFile(_enter(*a , **a ) )
snake_case__ : List[Any] =fixed_enter
| 385 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
snake_case__ : List[str] =tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case__ : Dict =model(a )["""last_hidden_state"""]
snake_case__ : Any =tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
snake_case__ : str =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 385 | 1 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict=1_3 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=9_9 , UpperCAmelCase_ : List[str]=6_4 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=6_4 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : Any=1_6 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=None , ):
"""simple docstring"""
a : Dict = parent
a : Optional[int] = batch_size
a : str = seq_length
a : Optional[Any] = is_training
a : Union[str, Any] = use_input_mask
a : int = use_token_type_ids
a : List[Any] = use_labels
a : Any = vocab_size
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : str = intermediate_size
a : Optional[int] = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Any = type_vocab_size
a : Tuple = type_sequence_label_size
a : str = initializer_range
a : Optional[Any] = num_labels
a : str = num_choices
a : Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Dict = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a : List[str] = None
a : int = None
a : List[str] = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
a : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[Any] = MPNetModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_ , UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = MPNetForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : str = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[str] = self.num_labels
a : Optional[Any] = MPNetForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : int = self.num_choices
a : Union[str, Any] = MPNetForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Tuple = self.num_labels
a : Optional[Any] = MPNetForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Dict = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a)) : Dict = config_and_inputs
a : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A : int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = False
A : str = True
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = MPNetModelTester(self)
a : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[str] = MPNetModel.from_pretrained('microsoft/mpnet-base')
a : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
a : int = model(UpperCAmelCase_)[0]
a : List[Any] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , UpperCAmelCase_)
a : Dict = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4))
| 610 | '''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase :
"""simple docstring"""
A : Optional[int] = None
A : Optional[jnp.ndarray] = None
A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str):
"""simple docstring"""
return cls()
@dataclass
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : jnp.ndarray
A : jnp.ndarray
A : KarrasVeSchedulerState
class UpperCamelCase ( a_ , a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1_0_0 , UpperCAmelCase_ : float = 1.0_07 , UpperCAmelCase_ : float = 8_0 , UpperCAmelCase_ : float = 0.05 , UpperCAmelCase_ : float = 5_0 , ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple = ()):
"""simple docstring"""
a : str = jnp.arange(0 , UpperCAmelCase_)[::-1].copy()
a : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase_ , schedule=jnp.array(UpperCAmelCase_ , dtype=jnp.floataa) , timesteps=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
a : Tuple = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Optional[Any] = random.split(UpperCAmelCase_ , num=1)
a : Dict = self.config.s_noise * random.normal(key=UpperCAmelCase_ , shape=sample.shape)
a : List[str] = sigma + gamma * sigma
a : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : Dict = sample_hat + sigma_hat * model_output
a : Dict = (sample_hat - pred_original_sample) / sigma_hat
a : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : Union[str, Any] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : KarrasVeSchedulerState , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
"""simple docstring"""
raise NotImplementedError()
| 610 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 105 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
UpperCamelCase = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None ):
"""simple docstring"""
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCamelCase = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCamelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__magic_name__ , """w""" , newline="""\n""" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , """r""" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
# Copy consistency with a really long name
UpperCamelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __magic_name__ , overwrite_result=re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 386 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( ):
return 1
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ = 200 ):
return two_pound(UpperCamelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 706 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__magic_name__ = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""") as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""") as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""")[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split()) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self):
return len(self.encoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , lowerCAmelCase__):
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(lowerCAmelCase__):
try:
__SCREAMING_SNAKE_CASE = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = new_word
if len(lowerCAmelCase__) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = word
return word
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(""" """))
return bpe_tokens
def snake_case_ ( self , lowerCAmelCase__):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def snake_case_ ( self , lowerCAmelCase__):
return self.decoder.get(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + """\n""")
__SCREAMING_SNAKE_CASE = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""")
__SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(lowerCAmelCase__) + """\n""")
index += 1
return vocab_file, merge_file
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = """ """ + text
return (text, kwargs)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text)
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__)
if len(lowerCAmelCase__) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 248 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase=0.01 , __lowerCAmelCase=1_000 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = p_stop
SCREAMING_SNAKE_CASE_ : Dict = max_length
def __iter__( self ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.random() < self.p_stop
class snake_case_ ( unittest.TestCase ):
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
SCREAMING_SNAKE_CASE_ : int = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE_ : Tuple = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=False ):
random.seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
SCREAMING_SNAKE_CASE_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE_ : Dict = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE_ : Tuple = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ):
Accelerator()
SCREAMING_SNAKE_CASE_ : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 345 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=2 , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Any = patch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : List[str] = num_patches + 2
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = DeiTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = DeiTForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = DeiTForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : List[str] = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = DeiTModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __A ( self ):
pass
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = model(**__lowerCAmelCase ).loss
loss.backward()
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE_ : Any = problem_type['title']
SCREAMING_SNAKE_CASE_ : List[str] = problem_type['num_labels']
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE_ : int = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE_ : List[str] = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __A ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = DeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pixel_values.to(__lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase )
| 345 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =DanceDiffusionPipeline
snake_case =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
snake_case =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCAmelCase =IPNDMScheduler()
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =DanceDiffusionPipeline(**_snake_case )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =pipe(**_snake_case )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase =np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =torch_device
_UpperCAmelCase =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase =np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =torch_device
_UpperCAmelCase =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase =np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 705 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
_UpperCAmelCase =get_failure_array(_lowerCamelCase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase =0, 0 # index into text, pattern
while i < len(_lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase =failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( _lowerCamelCase ) ->list[int]:
_UpperCAmelCase =[0]
_UpperCAmelCase =0
_UpperCAmelCase =1
while j < len(_lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase =failure[i - 1]
continue
j += 1
failure.append(_lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
snake_case__ : Dict = 'abc1abc12'
snake_case__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ : int = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
snake_case__ : Tuple = 'ABABX'
snake_case__ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
snake_case__ : Optional[int] = 'AAAB'
snake_case__ : Optional[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
snake_case__ : int = 'abcdabcy'
snake_case__ : int = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
snake_case__ : str = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 592 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : List[Any] = BlenderbotSmallConfig
_snake_case : Tuple = {}
_snake_case : str = """gelu"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int=13 , lowerCamelCase__ :str=7 , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[int]=99 , lowerCamelCase__ :List[str]=32 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :Dict=4 , lowerCamelCase__ :str=37 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[Any]=20 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :Tuple=1 , lowerCamelCase__ :Tuple=0 , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :int = seq_length
UpperCamelCase__ :Any = is_training
UpperCamelCase__ :Union[str, Any] = use_labels
UpperCamelCase__ :str = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Union[str, Any] = num_attention_heads
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = max_position_embeddings
UpperCamelCase__ :Optional[int] = eos_token_id
UpperCamelCase__ :Optional[int] = pad_token_id
UpperCamelCase__ :Optional[Any] = bos_token_id
def __a ( self :List[str] ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ :Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ :Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ :Dict = prepare_blenderbot_small_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def __a ( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Optional[int] = TFBlenderbotSmallModel(config=__lowercase ).get_decoder()
UpperCamelCase__ :Tuple = inputs_dict["""input_ids"""]
UpperCamelCase__ :str = input_ids[:1, :]
UpperCamelCase__ :Union[str, Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ :Union[str, Any] = inputs_dict["""head_mask"""]
UpperCamelCase__ :str = 1
# first forward pass
UpperCamelCase__ :str = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ :Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ :int = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ :Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ :Tuple = model(__lowercase , attention_mask=__lowercase )[0]
UpperCamelCase__ :Any = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ :List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ :Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def A ( lowercase__ : Any , lowercase__ : List[str] , lowercase__ : int , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=None , lowercase__ : List[str]=None , lowercase__ : Optional[int]=None , lowercase__ : Tuple=None , ) -> int:
if attention_mask is None:
UpperCamelCase__ :Dict = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ :str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ :List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_snake_case : Dict = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_snake_case : Union[str, Any] = True
_snake_case : Tuple = False
_snake_case : Optional[int] = False
def __a ( self :Optional[int] ):
UpperCamelCase__ :str = TFBlenderbotSmallModelTester(self )
UpperCamelCase__ :List[Any] = ConfigTester(self , config_class=__lowercase )
def __a ( self :Tuple ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i\'m going to throw up.\nand why is that?"""
]
_snake_case : Union[str, Any] = """facebook/blenderbot_small-90M"""
@cached_property
def __a ( self :Union[str, Any] ):
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.tokenizer(self.src_text , return_tensors="""tf""" )
UpperCamelCase__ :List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , )
UpperCamelCase__ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 45 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( _A = 100 ):
'''simple docstring'''
snake_case_ = factorial(_A )
snake_case_ = split_and_add(_A )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 376 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :List[str] = "vivit"
def __init__( self ,UpperCAmelCase_=224 ,UpperCAmelCase_=32 ,UpperCAmelCase_=[2, 16, 16] ,UpperCAmelCase_=3 ,UpperCAmelCase_=768 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=3_072 ,UpperCAmelCase_="gelu_fast" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-0_6 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> Any:
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = num_frames
lowercase__ = tubelet_size
lowercase__ = num_channels
lowercase__ = qkv_bias
super().__init__(**UpperCAmelCase_ )
| 707 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case (unittest.TestCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=100 ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,) -> List[Any]:
lowercase__ = parent
lowercase__ = vocab_size
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def _a ( self ) -> List[str]:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = FlaxBeitModel(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = self.type_sequence_label_size
lowercase__ = FlaxBeitForImageClassification(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FlaxBeitForImageClassification(UpperCAmelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Optional[int] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _a ( self ) -> None:
lowercase__ = FlaxBeitModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = model_class(UpperCAmelCase_ )
@jax.jit
def model_jitted(UpperCAmelCase_ ,**UpperCAmelCase_ ):
return model(pixel_values=UpperCAmelCase_ ,**UpperCAmelCase_ )
with self.subTest("JIT Enabled" ):
lowercase__ = model_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = model_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) ,len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _a ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[int]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" ).pixel_values
# prepare bool_masked_pos
lowercase__ = np.ones((1, 196) ,dtype=UpperCAmelCase_ )
# forward pass
lowercase__ = model(pixel_values=UpperCAmelCase_ ,bool_masked_pos=UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 196, 8_192)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,UpperCAmelCase_ ,atol=1E-2 ) )
@slow
def _a ( self ) -> List[str]:
lowercase__ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" )
# forward pass
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 1_000)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 281
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Optional[int]:
lowercase__ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" )
# forward pass
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 21_841)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
| 539 | 0 |
Subsets and Splits