code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : List[str] ={
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
SCREAMING_SNAKE_CASE_ : Any = model
SCREAMING_SNAKE_CASE_ : int = kwargs.get('model_save_dir' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.get('latest_model_name' , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Any = 'CPUExecutionProvider'
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ : int = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
SCREAMING_SNAKE_CASE_ : Dict = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def UpperCamelCase__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase__ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ : int = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Any = Path(lowerCAmelCase__ ).parent
SCREAMING_SNAKE_CASE_ : str = Path(lowerCAmelCase__ ).name
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = None
if len(str(lowerCAmelCase__ ).split('@' ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = model_id.split('@' )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 101 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : Tuple = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[str] = """resnet"""
__lowerCAmelCase : List[Any] = ["""basic""", """bottleneck"""]
def __init__( self , _A=3 , _A=6_4 , _A=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _A=[3, 4, 6, 3] , _A="bottleneck" , _A="relu" , _A=False , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : str = embedding_size
UpperCamelCase : List[Any] = hidden_sizes
UpperCamelCase : str = depths
UpperCamelCase : Tuple = layer_type
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : str = downsample_in_first_stage
UpperCamelCase : str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def _a ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ):
'''simple docstring'''
return 1e-3
| 102 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = []
_snake_case = []
_snake_case = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_snake_case = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
_snake_case = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = BeautifulSoup(__lowerCamelCase , '''html.parser''' )
_snake_case = []
_snake_case = []
_snake_case = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_snake_case = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
_snake_case , _snake_case = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = ''''''
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : Optional[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
_snake_case = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__lowerCamelCase )}.""" )
_snake_case = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
_snake_case = [html_strings]
# Get nodes + xpaths
_snake_case = []
_snake_case = []
for html_string in html_strings:
_snake_case , _snake_case , _snake_case = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
_snake_case = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
_snake_case = {'''nodes''': nodes, '''xpaths''': xpaths}
_snake_case = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 103 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
UpperCamelCase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
UpperCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> dict[str, int]:
"""simple docstring"""
A__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase ( UpperCAmelCase_ : tuple ) -> str:
"""simple docstring"""
return x[0]
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> str:
"""simple docstring"""
A__ = get_letter_count(UpperCAmelCase_ )
A__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase_ )
A__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=UpperCAmelCase_ )
A__ = "".join(freq_to_letter[freq] )
A__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCAmelCase_, reverse=UpperCAmelCase_ )
A__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> int:
"""simple docstring"""
A__ = get_frequency_order(UpperCAmelCase_ )
A__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase__ : List[str] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE_ : Dict = []
for num in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE_ : Dict = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 105 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
A = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
A = dct.pop(lowerCAmelCase__ )
A = val
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ ), v_bias) )
A = qkv_bias
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = 364 if 'coco' in model_name else 224
A = BlipaVisionConfig(image_size=lowerCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=lowerCAmelCase__ , text_config=lowerCAmelCase__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
A = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
A = tokenizer('\n' , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
A , A = get_blipa_config(lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
A = BlipaForConditionalGeneration(lowerCAmelCase__ ).eval()
A = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
A , A = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
A = 'cuda' if torch.cuda.is_available() else 'cpu'
A , A , A = load_model_and_preprocess(
name=lowerCAmelCase__ , model_type=lowerCAmelCase__ , is_eval=lowerCAmelCase__ , device=lowerCAmelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(lowerCAmelCase__ )
if key.startswith('Qformer.bert' ):
A = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
A = key.replace('self' , 'attention' )
if "opt_proj" in key:
A = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
A = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
A = key.replace('opt' , 'language' )
if key.startswith('t5' ):
A = key.replace('t5' , 'language' )
A = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = hf_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors['eval'](lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
A = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
# create processor
A = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
A = BlipaProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
A = processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
hf_model.to(lowerCAmelCase__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
A = hf_model(lowerCAmelCase__ , lowerCAmelCase__ ).logits
else:
A = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
A = hf_model(lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCAmelCase__ )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCAmelCase__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase__ ) , lowerCAmelCase__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
A = ''
A = tokenizer(lowerCAmelCase__ , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
A = original_model.generate({'image': original_pixel_values} )
A = hf_model.generate(
lowerCAmelCase__ , lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCAmelCase__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase__ )
A = [text.strip() for text in output_text]
print('HF generation:' , lowerCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case :str =argparse.ArgumentParser()
__snake_case :Optional[Any] =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__snake_case :List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 106 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = KandinskyVaaPriorPipeline
__lowerCAmelCase = ["prompt"]
__lowerCAmelCase = ["prompt", "negative_prompt"]
__lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__lowerCAmelCase = False
@property
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return 32
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
return 32
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return 1_00
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
_A = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __UpperCAmelCase ( self : Dict ) -> Tuple:
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=2_24, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, )
_A = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_A = CLIPImageProcessor(
crop_size=2_24, do_center_crop=UpperCamelCase__, do_normalize=UpperCamelCase__, do_resize=UpperCamelCase__, image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073], image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711], resample=3, size=2_24, )
return image_processor
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_image_processor
_A = UnCLIPScheduler(
variance_type='fixed_small_log', prediction_type='sample', num_train_timesteps=10_00, clip_sample=UpperCamelCase__, clip_sample_range=10.0, )
_A = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple=0 ) -> Optional[Any]:
if str(UpperCamelCase__ ).startswith('mps' ):
_A = torch.manual_seed(UpperCamelCase__ )
else:
_A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_A = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self : Dict ) -> str:
_A = 'cpu'
_A = self.get_dummy_components()
_A = self.pipeline_class(**UpperCamelCase__ )
_A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
_A = output.image_embeds
_A = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ), return_dict=UpperCamelCase__, )[0]
_A = image[0, -10:]
_A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_A = torch_device == 'cpu'
_A = True
_A = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__, relax_max_difference=UpperCamelCase__, test_mean_pixel_difference=UpperCamelCase__, )
@skip_mps
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = torch_device == 'cpu'
_A = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__, test_mean_pixel_difference=UpperCamelCase__, )
| 107 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : int = 50 , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
_UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test" | 108 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__SCREAMING_SNAKE_CASE = new_id
# turn into Numpy arrays
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
if reduce_labels:
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label - 1
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label != ignore_index
__SCREAMING_SNAKE_CASE = np.not_equal(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pred_label[mask]
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )[mask]
__SCREAMING_SNAKE_CASE = pred_label[pred_label == label]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = total_intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# compute metrics
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = total_area_intersect.sum() / total_area_label.sum()
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_union
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_label
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = all_acc
__SCREAMING_SNAKE_CASE = iou
__SCREAMING_SNAKE_CASE = acc
if nan_to_num is not None:
__SCREAMING_SNAKE_CASE = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : bool ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Dict[int, int]] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = mean_iou(
results=lowerCamelCase ,gt_seg_maps=lowerCamelCase ,num_labels=lowerCamelCase ,ignore_index=lowerCamelCase ,nan_to_num=lowerCamelCase ,label_map=lowerCamelCase ,reduce_labels=lowerCamelCase ,)
return iou_result
| 109 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case ,_snake_case=False ,_snake_case=False ):
UpperCAmelCase__ : List[str] = 'backbone.' if is_semantic else ''
UpperCAmelCase__ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=False ,_snake_case=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : Union[str, Any] = 'backbone.' if is_semantic else ''
# queries, keys and values
UpperCAmelCase__ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : Optional[int] = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Tuple = q_bias
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ : Any = gamma_a
UpperCAmelCase__ : str = gamma_a
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : int = dct.pop(_snake_case )
UpperCAmelCase__ : Any = val
def lowerCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ : Any = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase__ : Optional[Any] = False if 'rvlcdip' in checkpoint_url else True
UpperCAmelCase__ : Dict = BeitConfig(use_absolute_position_embeddings=_snake_case ,use_mask_token=_snake_case )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : List[str] = 1024
UpperCAmelCase__ : Union[str, Any] = 4096
UpperCAmelCase__ : Union[str, Any] = 24
UpperCAmelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : Any = 16
UpperCAmelCase__ : Optional[int] = 'huggingface/label-files'
UpperCAmelCase__ : Optional[int] = 'rvlcdip-id2label.json'
UpperCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type='dataset' ) ,'r' ) )
UpperCAmelCase__ : str = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : int = torch.hub.load_state_dict_from_url(_snake_case ,map_location='cpu' )['model']
UpperCAmelCase__ : Tuple = create_rename_keys(_snake_case ,has_lm_head=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,_snake_case ,has_lm_head=_snake_case )
# load HuggingFace model
UpperCAmelCase__ : List[str] = BeitForMaskedImageModeling(_snake_case ) if has_lm_head else BeitForImageClassification(_snake_case )
model.eval()
model.load_state_dict(_snake_case )
# Check outputs on an image
UpperCAmelCase__ : Optional[int] = BeitImageProcessor(
size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=_snake_case )
UpperCAmelCase__ : Optional[int] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_snake_case ,return_tensors='pt' )
UpperCAmelCase__ : Optional[int] = encoding['pixel_values']
UpperCAmelCase__ : Optional[Any] = model(_snake_case )
UpperCAmelCase__ : int = outputs.logits
# verify logits
UpperCAmelCase__ : Tuple = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_snake_case ), "Shape of logits not as expected"
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Tuple = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
UpperCAmelCase__ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization='nielsr' ,commit_message='Add image processor' ,use_temp_dir=_snake_case ,)
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization='nielsr' ,commit_message='Add model' ,use_temp_dir=_snake_case ,)
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
UpperCamelCase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCAmelCase__ ( _A, _A ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "focalnet"
def __init__( self , a_=224 , a_=4 , a_=3 , a_=96 , a_=False , a_=[192, 384, 768, 768] , a_=[2, 2, 6, 2] , a_=[2, 2, 2, 2] , a_=[3, 3, 3, 3] , a_="gelu" , a_=4.0 , a_=0.0 , a_=0.1 , a_=False , a_=1E-4 , a_=False , a_=False , a_=False , a_=0.02 , a_=1E-5 , a_=32 , a_=None , a_=None , **a_ , ):
super().__init__(**__lowerCAmelCase )
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Dict = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Dict = embed_dim
lowerCamelCase_ : Any = use_conv_embed
lowerCamelCase_ : Dict = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : int = focal_levels
lowerCamelCase_ : Any = focal_windows
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[Any] = mlp_ratio
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = drop_path_rate
lowerCamelCase_ : List[str] = use_layerscale
lowerCamelCase_ : List[Any] = layerscale_value
lowerCamelCase_ : Tuple = use_post_layernorm
lowerCamelCase_ : List[Any] = use_post_layernorm_in_modulation
lowerCamelCase_ : Dict = normalize_modulator
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = layer_norm_eps
lowerCamelCase_ : Any = encoder_stride
lowerCamelCase_ : Tuple = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_ ,lowerCamelCase_ : List[str] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 250 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=3_2 , _A=2 , _A=3 , _A=1_6 , _A=[1, 2, 1] , _A=[2, 2, 4] , _A=2 , _A=2.0 , _A=True , _A=0.0 , _A=0.0 , _A=0.1 , _A="gelu" , _A=False , _A=True , _A=0.02 , _A=1E-5 , _A=True , _A=None , _A=True , _A=1_0 , _A=8 , _A=["stage1", "stage2", "stage3"] , _A=[1, 2, 3] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embed_dim
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =use_absolute_embeddings
_SCREAMING_SNAKE_CASE =patch_norm
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =encoder_stride
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MaskFormerSwinModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_SCREAMING_SNAKE_CASE =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MaskFormerSwinBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =['''stem''']
_SCREAMING_SNAKE_CASE =MaskFormerSwinBackbone(config=__lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _A, _A, unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase : Dict = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : str = False
lowercase : str = False
lowercase : List[Any] = False
lowercase : Optional[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MaskFormerSwinModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=__lowerCAmelCase , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# Swin has a different seq_length
_SCREAMING_SNAKE_CASE =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_SCREAMING_SNAKE_CASE =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_SCREAMING_SNAKE_CASE =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_A ):
_SCREAMING_SNAKE_CASE =0
return t
def check_equivalence(_A , _A , _A , _A={} ):
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple()
def recursive_check(_A , _A ):
if isinstance(__lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCAmelCase ) , set_nan_tensor_to_zero(__lowerCAmelCase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}. Dict has"""
f""" `nan`: {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}."""
) , )
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {'''output_hidden_states''': True} )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {'''output_hidden_states''': True} )
@require_torch
class __UpperCAmelCase ( unittest.TestCase, _A ):
'''simple docstring'''
lowercase : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase : Any = MaskFormerSwinConfig
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MaskFormerSwinModelTester(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =backbone_class(__lowerCAmelCase )
backbone.to(__lowerCAmelCase )
backbone.eval()
_SCREAMING_SNAKE_CASE =backbone(**__lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_SCREAMING_SNAKE_CASE =backbone(**__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_SCREAMING_SNAKE_CASE =backbone(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 255 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
__lowerCAmelCase : Dict = RobertaPreLayerNormConfig.from_pretrained(
_snake_case , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowerCAmelCase : str = torch.load(hf_hub_download(repo_id=_snake_case , filename="""pytorch_model.bin""" ) )
__lowerCAmelCase : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowerCAmelCase : Union[str, Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowerCAmelCase : List[Any] = tensor_value
__lowerCAmelCase : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 504 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __UpperCamelCase ):
for param in module.parameters():
__lowercase : str = False
def __UpperCAmelCase ( ):
__lowercase : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase : str = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def __UpperCAmelCase ( ):
__lowercase : int = datetime.now()
__lowercase : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 76 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__ ( *A_: Optional[Any] , A_: Optional[Union[Dict, Any]] = None , A_: Optional[int]=True , A_: Optional[int]=2 ) -> str:
"""simple docstring"""
from .. import __version__
__UpperCAmelCase =take_from
__UpperCAmelCase =()
if not isinstance(args[0] , _snake_case ):
__UpperCAmelCase =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_snake_case ).base_version ) >= version.parse(_snake_case ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__UpperCAmelCase =None
if isinstance(_snake_case , _snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_snake_case ),)
__UpperCAmelCase =F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_snake_case , _snake_case ):
values += (getattr(_snake_case , _snake_case ),)
__UpperCAmelCase =F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__UpperCAmelCase =F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__UpperCAmelCase =warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , _snake_case , stacklevel=_snake_case )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0:
__UpperCAmelCase =inspect.getouterframes(inspect.currentframe() )[1]
__UpperCAmelCase =call_frame.filename
__UpperCAmelCase =call_frame.lineno
__UpperCAmelCase =call_frame.function
__UpperCAmelCase , __UpperCAmelCase =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_snake_case ) == 0:
return
elif len(_snake_case ) == 1:
return values[0]
return values
| 68 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 0 |
'''simple docstring'''
lowercase__ : List[str] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _lowerCAmelCase ( __snake_case : int ) -> int:
__A : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ : Optional[Any] = [None] * 10_00_00_00
lowercase__ : Any = True
lowercase__ : List[Any] = False
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__A : str = chain(next_number(_snake_case ) )
__A : Dict = number_chain
while number < 10_00_00_00:
__A : List[Any] = number_chain
number *= 10
return number_chain
def _lowerCAmelCase ( __snake_case : int = 10_00_00_00 ) -> int:
for i in range(1 , _snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""") | 8 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 0 |
'''simple docstring'''
__lowerCAmelCase = 0 # The first color of the flag.
__lowerCAmelCase = 1 # The second color of the flag.
__lowerCAmelCase = 2 # The third color of the flag.
__lowerCAmelCase = (red, white, blue)
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_a : Dict = 0
_a : str = len(_snake_case ) - 1
_a : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
_a, _a : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_a, _a : str = sequence[high], sequence[mid]
high -= 1
else:
_a : int = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by commas:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 229 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
for e in env_keys:
__snake_case : Any = int(os.environ.get(_snake_case , -1 ) )
if val >= 0:
return val
return default
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : int = os.environ.get(_snake_case , str(_snake_case ) )
return strtobool(_snake_case ) == 1 # As its name indicates `strtobool` actually returns an int...
def _a ( _lowerCamelCase , _lowerCamelCase="no" ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = os.environ.get(_snake_case , str(_snake_case ) )
return value
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Any = logging.getLogger(__name__)
def __lowerCamelCase ( __a :int , __a :str ) -> List[str]:
"""simple docstring"""
A__ = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Dict ) -> List[str]:
"""simple docstring"""
with open(_snake_case , encoding="""utf_8""" ) as f:
A__ = csv.reader(_snake_case )
A__ = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[Any] , __a :str , __a :List[str] , __a :Tuple , __a :int , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(_snake_case )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(_snake_case ) - 1
A__ = len(_snake_case ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=_snake_case , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=_snake_case , type=_snake_case , required=_snake_case , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=_snake_case , default="""""" )
parser.add_argument("""--eval_dataset""" , type=_snake_case , default="""""" )
parser.add_argument("""--seed""" , type=_snake_case , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=_snake_case , default=3 )
parser.add_argument("""--train_batch_size""" , type=_snake_case , default=8 )
parser.add_argument("""--eval_batch_size""" , type=_snake_case , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=_snake_case , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=_snake_case , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=_snake_case , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_snake_case , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=_snake_case , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=_snake_case , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=_snake_case , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=_snake_case , default=0.01 )
parser.add_argument("""--lm_coef""" , type=_snake_case , default=0.9 )
parser.add_argument("""--n_valid""" , type=_snake_case , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=_snake_case , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_snake_case , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*_snake_case )
A__ = RandomSampler(_snake_case )
A__ = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
A__ = TensorDataset(*_snake_case )
A__ = SequentialSampler(_snake_case )
A__ = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(_snake_case , desc="""Training""" )
for step, batch in enumerate(_snake_case ):
A__ = tuple(t.to(_snake_case ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(_snake_case , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , _snake_case )
A__ = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(_snake_case , desc="""Evaluating""" ):
A__ = tuple(t.to(_snake_case ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(_snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , _snake_case , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 176 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase__ ( lowerCamelCase, lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_SCREAMING_SNAKE_CASE : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_SCREAMING_SNAKE_CASE : Dict = ''
else:
_SCREAMING_SNAKE_CASE : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE : int = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case, _snake_case )
def lowercase__ ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_SCREAMING_SNAKE_CASE : List[str] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_snake_case, _snake_case )
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = dct.pop(_snake_case )
_SCREAMING_SNAKE_CASE : int = val
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = ViTMSNConfig()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_000
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'datasets/huggingface/label-files'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'imagenet-1k-id2label.json'
_SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(_snake_case, _snake_case ), 'r' ) )
_SCREAMING_SNAKE_CASE : Optional[int] = {int(_snake_case ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : str = idalabel
_SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE : str = 384
_SCREAMING_SNAKE_CASE : List[Any] = 1_536
_SCREAMING_SNAKE_CASE : Any = 6
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE : List[Any] = 1_024
_SCREAMING_SNAKE_CASE : int = 4_096
_SCREAMING_SNAKE_CASE : List[Any] = 24
_SCREAMING_SNAKE_CASE : List[str] = 16
_SCREAMING_SNAKE_CASE : int = 0.1
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE : Tuple = 4
elif "l7" in checkpoint_url:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 7
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : Dict = 4_096
_SCREAMING_SNAKE_CASE : Tuple = 24
_SCREAMING_SNAKE_CASE : int = 16
_SCREAMING_SNAKE_CASE : List[Any] = 0.1
_SCREAMING_SNAKE_CASE : Any = ViTMSNModel(_snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_snake_case, map_location='cpu' )['target_encoder']
_SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_snake_case )
_SCREAMING_SNAKE_CASE : int = create_rename_keys(_snake_case, base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case, _snake_case, _snake_case )
read_in_q_k_v(_snake_case, _snake_case, base_model=_snake_case )
model.load_state_dict(_snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_SCREAMING_SNAKE_CASE : int = Image.open(requests.get(_snake_case, stream=_snake_case ).raw )
_SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor(
size=config.image_size, image_mean=_snake_case, image_std=_snake_case )
_SCREAMING_SNAKE_CASE : Dict = image_processor(images=_snake_case, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE : List[str] = model(**_snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
_SCREAMING_SNAKE_CASE : int = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE : int = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _snake_case, atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 621 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( _A, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BlenderbotSmallTokenizer
__UpperCAmelCase : List[str] = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
lowerCamelCase_ : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase_ : Optional[int] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
lowerCamelCase_ : List[Any] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def _UpperCamelCase ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = "adapt act apte"
lowerCamelCase_ : Any = "adapt act apte"
return input_text, output_text
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ : str = "adapt act apte"
lowerCamelCase_ : Dict = ["adapt", "act", "ap@@", "te"]
lowerCamelCase_ : int = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase_ : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
lowerCamelCase_ : Optional[Any] = "I am a small frog."
lowerCamelCase_ : Tuple = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["input_ids"]
lowerCamelCase_ : Union[str, Any] = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
lowerCamelCase_ : str = "I am a small frog ."
lowerCamelCase_ : List[Any] = "."
lowerCamelCase_ : List[Any] = tok(__lowerCAmelCase )["input_ids"]
lowerCamelCase_ : List[str] = tok(__lowerCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 250 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase : Optional[datasets.Features] = None
lowercase : str = "utf-8"
lowercase : Optional[str] = None
lowercase : Optional[str] = None
lowercase : bool = True # deprecated
lowercase : Optional[int] = None # deprecated
lowercase : int = 10 << 20 # 10MB
lowercase : Optional[bool] = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase : List[Any] = JsonConfig
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_SCREAMING_SNAKE_CASE =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema.field(__lowerCAmelCase ).type
_SCREAMING_SNAKE_CASE =pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_SCREAMING_SNAKE_CASE =json.load(__lowerCAmelCase )
# We keep only the field we are interested in
_SCREAMING_SNAKE_CASE =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
_SCREAMING_SNAKE_CASE =set().union(*[row.keys() for row in dataset] )
_SCREAMING_SNAKE_CASE ={col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_SCREAMING_SNAKE_CASE =max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
_SCREAMING_SNAKE_CASE =(
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_SCREAMING_SNAKE_CASE =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_SCREAMING_SNAKE_CASE =batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
_SCREAMING_SNAKE_CASE =paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(__lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_SCREAMING_SNAKE_CASE =json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(f"""Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
_SCREAMING_SNAKE_CASE =set().union(*[row.keys() for row in dataset] )
_SCREAMING_SNAKE_CASE ={col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
_SCREAMING_SNAKE_CASE =pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(f"""Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 255 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 80 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar('T')
class snake_case_ ( Generic[T] ):
def __init__( self : Tuple , _snake_case : T )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = data
__lowerCAmelCase : Dict = None
def __str__( self : Optional[Any] )->str:
'''simple docstring'''
return F'''{self.data}'''
class snake_case_ ( Generic[T] ):
def __init__( self : Any )->None:
'''simple docstring'''
__lowerCAmelCase : int = None
def __iter__( self : int )->Iterator[T]:
'''simple docstring'''
__lowerCAmelCase : Any = self.top
while node:
yield node.data
__lowerCAmelCase : str = node.next
def __str__( self : Any )->str:
'''simple docstring'''
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : List[str] )->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCAmelCase__ ( self : Optional[Any] )->bool:
'''simple docstring'''
return self.top is None
def UpperCAmelCase__ ( self : int , _snake_case : T )->None:
'''simple docstring'''
__lowerCAmelCase : int = Node(__lowerCAmelCase )
if not self.is_empty():
__lowerCAmelCase : Any = self.top
__lowerCAmelCase : Dict = node
def UpperCAmelCase__ ( self : List[Any] )->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.top
__lowerCAmelCase : int = self.top.next
return pop_node.data
def UpperCAmelCase__ ( self : Optional[int] )->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def UpperCAmelCase__ ( self : Tuple )->None:
'''simple docstring'''
__lowerCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 504 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *__UpperCamelCase ):
if not isinstance(_snake_case , _snake_case ):
__lowercase : List[Any] = list(_snake_case )
for i in range(len(_snake_case ) ):
__lowercase : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Union[str, Any] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_snake_case , _snake_case ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( __UpperCamelCase = None , __UpperCamelCase = 1_28 ):
if function is None:
return functools.partial(_snake_case , starting_batch_size=_snake_case )
__lowercase : Tuple = starting_batch_size
def decorator(*__UpperCamelCase , **__UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase : List[str] = list(inspect.signature(_snake_case ).parameters.keys() )
# Guard against user error
if len(_snake_case ) < (len(_snake_case ) + 1):
__lowercase : int = ''', '''.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_snake_case , *_snake_case , **_snake_case )
except Exception as e:
if should_reduce_batch_size(_snake_case ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 76 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 384, 24, 24] , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =backbone_out_indices
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_labels
__UpperCAmelCase =backbone_featmap_shape
__UpperCAmelCase =scope
__UpperCAmelCase =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase =(image_size // patch_size) ** 2
__UpperCAmelCase =num_patches + 1
def _a ( self : str ) -> str:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> int:
__UpperCAmelCase ={
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
__UpperCAmelCase =DPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase =model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =DPTForDepthEstimation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase =model(__lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =DPTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase =model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a ( self : Dict ) -> List[str]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _A , _A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase : Dict = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[str] = False
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =DPTModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _a ( self : Any ) -> str:
pass
def _a ( self : str ) -> Any:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__lowerCAmelCase )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _a ( self : Any ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase )
def _a ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def _a ( self : Optional[int] ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
if model_class in get_values(__lowerCAmelCase ):
continue
__UpperCAmelCase =model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
__UpperCAmelCase =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__UpperCAmelCase =model(**__lowerCAmelCase ).loss
loss.backward()
def _a ( self : List[str] ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =False
__UpperCAmelCase =True
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase =model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase =self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__UpperCAmelCase =model(**__lowerCAmelCase ).loss
loss.backward()
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
__UpperCAmelCase =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase =[f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Any ) -> Optional[int]:
pass
@slow
def _a ( self : Optional[int] ) -> Union[str, Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase =DPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _a ( self : str ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase ="""add"""
with self.assertRaises(__lowerCAmelCase ):
__UpperCAmelCase =DPTForDepthEstimation(__lowerCAmelCase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ) -> str:
__UpperCAmelCase =DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__UpperCAmelCase =DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__lowerCAmelCase )
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__lowerCAmelCase )
__UpperCAmelCase =outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase =torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowerCAmelCase )
__UpperCAmelCase =torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1e-4 ) )
| 68 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : int = parent
__A : Dict = batch_size
__A : Optional[Any] = image_size
__A : Any = num_channels
__A : Optional[Any] = num_stages
__A : List[str] = hidden_sizes
__A : List[Any] = depths
__A : Any = is_training
__A : Dict = use_labels
__A : Union[str, Any] = intermediate_size
__A : str = hidden_act
__A : List[Any] = num_labels
__A : Optional[Any] = initializer_range
__A : List[str] = out_features
__A : Optional[int] = out_indices
__A : Dict = scope
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Optional[Any] = None
if self.use_labels:
__A : str = ids_tensor([self.batch_size] , self.num_labels)
__A : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ConvNextVaModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
__A : Dict = model(__lowerCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = ConvNextVaForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
__A : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = ConvNextVaBackbone(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
__A : Union[str, Any] = model(__lowerCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__A : List[str] = None
__A : Tuple = ConvNextVaBackbone(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
__A : List[str] = model(__lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : str = {'pixel_values': pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.prepare_config_and_inputs()
__A ,__A ,__A : List[str] = config_and_inputs
__A : int = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (_A , _A , unittest.TestCase ):
lowerCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = ConvNextVaModelTester(self)
__A : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_with_labels()
__A : List[str] = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase),
*get_values(__lowerCAmelCase),
]:
continue
__A : Any = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.train()
__A : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase)
__A : Any = model(**__lowerCAmelCase).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__A : List[Any] = False
__A : str = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase), *get_values(__lowerCAmelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
__A : str = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.gradient_checkpointing_enable()
model.train()
__A : Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase)
__A : Any = model(**__lowerCAmelCase).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(__lowerCAmelCase)
__A : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
__A : int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
__A : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A ,__A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Union[str, Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = ConvNextVaModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def _lowerCAmelCase ( ) -> Tuple:
__A : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224').to(__lowerCAmelCase)
__A : Optional[int] = self.default_image_processor
__A : Union[str, Any] = prepare_img()
__A : Union[str, Any] = preprocessor(images=__lowerCAmelCase , return_tensors='pt').to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(**__lowerCAmelCase)
# verify the logits
__A : List[str] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
__A : List[Any] = torch.tensor([0.9996, 0.1966, -0.4386]).to(__lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4)) | 8 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 0 |
'''simple docstring'''
__lowerCAmelCase = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
_a : List[str] = 0
_a : Any = 0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : int = []
for arabic, roman in ROMAN:
((_a), (_a)) : Union[str, Any] = divmod(_snake_case , _snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = 1
__snake_case : int = 2
while i * i <= n:
__snake_case : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = 1
__snake_case : Any = 1
while True:
i += 1
t_num += i
if count_divisors(_snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 26 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( __a :torch.nn.Module , __a :BnbQuantizationConfig , __a :Union[str, os.PathLike] = None , __a :Optional[Dict[str, Union[int, str, torch.device]]] = None , __a :Optional[List[str]] = None , __a :Optional[Dict[Union[int, str], Union[int, str]]] = None , __a :Optional[Union[str, os.PathLike]] = None , __a :bool = False , ) -> Dict:
"""simple docstring"""
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A__ = []
# custom device map
if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A__ = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A__ = getattr(_snake_case , _snake_case , _snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case )
A__ = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case )
def __lowerCamelCase ( __a :Optional[int] , __a :Optional[Any] , __a :Tuple=None , __a :Union[str, Any]=None , __a :Tuple=None ) -> Optional[Any]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case , _snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
_snake_case , low_zero=(device_map == """balanced_low_0""") , max_memory=_snake_case , **_snake_case , )
A__ = max_memory
A__ = infer_auto_device_map(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( __a :Any , __a :Tuple , __a :Dict=None , __a :Optional[Any]=None ) -> int:
"""simple docstring"""
if modules_to_not_convert is None:
A__ = []
A__ , A__ = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( __a :str , __a :Any , __a :List[str]=None , __a :Optional[int]=None , ) -> int:
"""simple docstring"""
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(_snake_case )
if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = """.""".join(_snake_case )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
A__ = True
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __a :Tuple ) -> List[str]:
"""simple docstring"""
with init_empty_weights():
A__ = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(_snake_case , [] )
A__ = len(_snake_case ) > 0
# Check if it is a base model
A__ = False
if hasattr(_snake_case , """base_model_prefix""" ):
A__ = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(_snake_case ) - set(_snake_case )
A__ = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
A__ = [""".weight""", """.bias"""]
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(_snake_case , """""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def __lowerCamelCase ( __a :int ) -> Union[str, Any]:
"""simple docstring"""
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( __a :nn.Module ) -> int:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( __a :Any , __a :Optional[Any] , __a :Optional[int] , __a :Optional[int] , __a :List[str] , __a :Dict , __a :Optional[int] ) -> Dict:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split(""".""" )
for split in splits[:-1]:
A__ = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case )
offload_weight(_snake_case , param_name.replace("""weight""" , """SCB""" ) , _snake_case , index=_snake_case )
set_module_tensor_to_device(_snake_case , _snake_case , """meta""" , dtype=_snake_case , value=torch.empty(*param.size() ) )
| 176 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
from functools import reduce
lowerCAmelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowercase__ ( lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase, lowerCamelCase : str(int(_snake_case ) * int(_snake_case ) ), n[i : i + 13] ) )
for i in range(len(_snake_case ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 621 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 0 |
from maths.prime_factors import prime_factors
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(_snake_case , _snake_case):
lowerCamelCase_ : int = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_snake_case)
if number < 1:
raise ValueError("Input must be a positive integer")
return -1 if len(prime_factors(_snake_case)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Tuple = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : str=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=[0.5, 0.5, 0.5] , _lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = size if size is not None else {"""shortest_edge""": 18}
__lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( _A , unittest.TestCase ):
__snake_case :str = LevitImageProcessor if is_vision_available() else None
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = LevitImageProcessingTester(self )
@property
def _a ( self : str ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 80 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class snake_case_ ( _A ):
A_ = "xlnet"
A_ = ["mems"]
A_ = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , _snake_case : Dict=32000 , _snake_case : List[str]=1024 , _snake_case : Dict=24 , _snake_case : Optional[Any]=16 , _snake_case : Dict=4096 , _snake_case : Any="gelu" , _snake_case : int=True , _snake_case : List[str]="bi" , _snake_case : Dict=0.02 , _snake_case : Union[str, Any]=1E-12 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=512 , _snake_case : List[str]=None , _snake_case : Tuple=True , _snake_case : Tuple=False , _snake_case : Optional[Any]=False , _snake_case : Union[str, Any]=-1 , _snake_case : Optional[Any]=False , _snake_case : Any="last" , _snake_case : List[Any]=True , _snake_case : Tuple="tanh" , _snake_case : int=0.1 , _snake_case : str=5 , _snake_case : str=5 , _snake_case : List[str]=5 , _snake_case : List[str]=1 , _snake_case : Optional[int]=2 , **_snake_case : List[str] , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : str = d_model
__lowerCAmelCase : int = n_layer
__lowerCAmelCase : str = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__lowerCAmelCase : List[str] = d_model // n_head
__lowerCAmelCase : Optional[Any] = ff_activation
__lowerCAmelCase : str = d_inner
__lowerCAmelCase : List[Any] = untie_r
__lowerCAmelCase : Dict = attn_type
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Tuple = dropout
__lowerCAmelCase : str = mem_len
__lowerCAmelCase : Tuple = reuse_len
__lowerCAmelCase : Dict = bi_data
__lowerCAmelCase : Optional[Any] = clamp_len
__lowerCAmelCase : List[Any] = same_length
__lowerCAmelCase : List[str] = summary_type
__lowerCAmelCase : Optional[int] = summary_use_proj
__lowerCAmelCase : int = summary_activation
__lowerCAmelCase : Any = summary_last_dropout
__lowerCAmelCase : str = start_n_top
__lowerCAmelCase : List[Any] = end_n_top
__lowerCAmelCase : str = bos_token_id
__lowerCAmelCase : Any = pad_token_id
__lowerCAmelCase : Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
__lowerCAmelCase : List[str] = kwargs["""use_cache"""]
__lowerCAmelCase : Union[str, Any] = use_mems_eval
__lowerCAmelCase : int = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[Any] )->Dict:
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) | 504 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def __UpperCAmelCase ( __UpperCamelCase ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 0 |
def lowercase__ ( A_: list , A_: list , A_: int , A_: int , A_: int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =knapsack(_snake_case , _snake_case , _snake_case , _snake_case , index + 1 )
if weights[index] <= max_weight:
__UpperCAmelCase =values[index] + knapsack(
_snake_case , _snake_case , _snake_case , max_weight - weights[index] , index + 1 )
return max(_snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCAmelCase ( __snake_case : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
__A : Optional[Any] = []
if isinstance(_snake_case , _snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _lowerCAmelCase ( __snake_case : int , __snake_case : Tuple[int, ...] ) -> Tuple[int, ...]:
__A : Optional[Any] = []
for d in reversed(_snake_case ):
idx.append(flat_idx % d )
__A : List[Any] = flat_idx // d
return tuple(reversed(_snake_case ) )
@torch.jit.ignore
def _lowerCAmelCase ( __snake_case : Sequence[int] , __snake_case : Sequence[int] , __snake_case : Sequence[int] , __snake_case : Optional[Sequence[bool]] = None , __snake_case : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__snake_case : List[bool] ) -> None:
__A : Any = True
for i in range(len(_snake_case ) ):
__A : str = -1 * (i + 1)
l[reversed_idx] &= tally
__A : Optional[int] = l[reversed_idx]
if start_edges is None:
__A : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(_snake_case )
if end_edges is None:
__A : Optional[int] = [e == (d - 1) for e, d in zip(_snake_case , _snake_case )]
reduce_edge_list(_snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_snake_case ) == 0:
return [()]
elif len(_snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__A : Tuple = []
__A : Optional[Any] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_snake_case , _snake_case ):
if s == e:
path_list.append(slice(_snake_case , s + 1 ) )
else:
break
__A : str = tuple(_snake_case )
__A : List[str] = len(_snake_case )
# start == end, and we're done
if divergence_idx == len(_snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__A : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(_snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__A : int = end[divergence_idx]
return tuple(
path + (slice(_snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__A : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCAmelCase ( __snake_case : torch.Tensor , __snake_case : int , __snake_case : int , __snake_case : int ) -> torch.Tensor:
__A : Union[str, Any] = t.shape[:no_batch_dims]
__A : str = list(_flat_idx_to_idx(_snake_case , _snake_case ) )
# _get_minimal_slice_set is inclusive
__A : Any = list(_flat_idx_to_idx(flat_end - 1 , _snake_case ) )
# Get an ordered list of slices to perform
__A : Any = _get_minimal_slice_set(
_snake_case , _snake_case , _snake_case , )
__A : int = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCAmelCase ( __snake_case : Callable , __snake_case : Dict[str, Any] , __snake_case : int , __snake_case : int , __snake_case : bool = False , __snake_case : Any = None , __snake_case : bool = False , ) -> Any:
if not (len(_snake_case ) > 0):
raise ValueError('Must provide at least one input' )
__A : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(_snake_case )]
__A : Dict = tuple([max(_snake_case ) for s in zip(*_snake_case )] )
def _prep_inputs(__snake_case : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__A : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__A : Dict = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__A : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__A : Union[str, Any] = tensor_tree_map(_prep_inputs , _snake_case )
__A : Union[str, Any] = None
if _out is not None:
__A : Tuple = tensor_tree_map(lambda __snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__A : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__A : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__snake_case : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__A : int = 0
__A : Union[str, Any] = prepped_outputs
for _ in range(_snake_case ):
# Chunk the input
if not low_mem:
__A : Any = _select_chunk
else:
__A : int = partial(
_chunk_slice , flat_start=_snake_case , flat_end=min(_snake_case , i + chunk_size ) , no_batch_dims=len(_snake_case ) , )
__A : List[str] = tensor_tree_map(_snake_case , _snake_case )
# Run the layer on the chunk
__A : int = layer(**_snake_case )
# Allocate space for the output
if out is None:
__A : Optional[Any] = tensor_tree_map(lambda __snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _snake_case )
# Put the chunk in its pre-allocated space
if isinstance(_snake_case , _snake_case ):
def assign(__snake_case : dict , __snake_case : dict ) -> None:
for k, v in da.items():
if isinstance(_snake_case , _snake_case ):
assign(_snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__A : Optional[int] = da[k]
assign(_snake_case , _snake_case )
elif isinstance(_snake_case , _snake_case ):
for xa, xa in zip(_snake_case , _snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__A : List[str] = xa
elif isinstance(_snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__A : List[str] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__A : str = tensor_tree_map(lambda __snake_case : t.view(orig_batch_dims + t.shape[1:] ) , _snake_case )
return out
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = 512 , ):
'''simple docstring'''
__A : Union[str, Any] = max_chunk_size
__A : int = None
__A : int = None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__A : Optional[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
__A : Union[str, Any] = [c for c in candidates if c > min_chunk_size]
__A : Optional[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCAmelCase) -> bool:
try:
with torch.no_grad():
fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase)
return True
except RuntimeError:
return False
__A : List[str] = 0
__A : Union[str, Any] = len(__lowerCAmelCase) - 1
while i > min_viable_chunk_size_index:
__A : Any = test_chunk_size(candidates[i])
if not viable:
__A : str = (min_viable_chunk_size_index + i) // 2
else:
__A : Optional[Any] = i
__A : Optional[int] = (i + len(__lowerCAmelCase) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = True
for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase):
assert type(__lowerCAmelCase) == type(__lowerCAmelCase)
if isinstance(__lowerCAmelCase , (list, tuple)):
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase)
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
__A : str = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase: x[0])]
__A : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase: x[0])]
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase)
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = True
__A : List[Any] = tree_map(lambda _UpperCAmelCase: a.shape if isinstance(__lowerCAmelCase , torch.Tensor) else a , __lowerCAmelCase , __lowerCAmelCase)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__lowerCAmelCase)
__A : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase)
else:
# Otherwise, we can reuse the precomputed value
__A : Optional[Any] = False
if not consistent:
__A : List[str] = self._determine_favorable_chunk_size(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__A : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 8 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 0 |
'''simple docstring'''
__lowerCAmelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Tuple = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_a : List[Any] = Stack()
_a : Optional[Any] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(_snake_case )
elif i == ")":
# RULE 4
_a : List[Any] = operator_stack.peek()
operator_stack.pop()
_a : Any = operand_stack.peek()
operand_stack.pop()
_a : List[Any] = operand_stack.peek()
operand_stack.pop()
_a : Union[str, Any] = operators[opr](_snake_case , _snake_case )
operand_stack.push(_snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowerCAmelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 229 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = grid.shape
__snake_case : Any = [-1, 1, 0, 0]
__snake_case : int = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__snake_case , __snake_case : Tuple = [(0, source)], set()
__snake_case : Tuple = np.full((rows, cols) , np.inf )
__snake_case : Optional[int] = 0
__snake_case : List[Any] = np.empty((rows, cols) , dtype=_snake_case )
__snake_case : List[Any] = None
while queue:
((__snake_case) , (__snake_case)) : Union[str, Any] = heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__snake_case : List[Any] = []
while (x, y) != source:
path.append((x, y) )
__snake_case , __snake_case : Dict = predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
__snake_case , __snake_case : Optional[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__snake_case : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
__snake_case : List[Any] = dist + 1
__snake_case : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
'''simple docstring'''
@staticmethod
def a_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
@require_torch
def a_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
A__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
A__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
A__ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 176 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase__ ( lowerCamelCase ):
return getitem, k
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return setitem, k, v
def lowercase__ ( lowerCamelCase ):
return delitem, k
def lowercase__ ( lowerCamelCase, lowerCamelCase, *lowerCamelCase ):
try:
return fun(_snake_case, *_snake_case ), None
except Exception as e:
return None, e
lowerCAmelCase__ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase__ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase__ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase__ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations', (
pytest.param(_add_items, id='add items' ),
pytest.param(_overwrite_items, id='overwrite items' ),
pytest.param(_delete_items, id='delete items' ),
pytest.param(_access_absent_items, id='access absent items' ),
pytest.param(_add_with_resize_up, id='add with resize up' ),
pytest.param(_add_with_resize_down, id='add with resize down' ),
), )
def lowercase__ ( lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = HashMap(initial_block_size=4 )
_SCREAMING_SNAKE_CASE : int = {}
for _, (fun, *args) in enumerate(_snake_case ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = _run_operation(_snake_case, _snake_case, *_snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_snake_case, _snake_case, *_snake_case )
assert my_res == py_res
assert str(_snake_case ) == str(_snake_case )
assert set(_snake_case ) == set(_snake_case )
assert len(_snake_case ) == len(_snake_case )
assert set(my.items() ) == set(py.items() )
def lowercase__ ( ):
def is_public(lowerCamelCase ) -> bool:
return not name.startswith('_' )
_SCREAMING_SNAKE_CASE : List[Any] = {name for name in dir({} ) if is_public(_snake_case )}
_SCREAMING_SNAKE_CASE : List[str] = {name for name in dir(HashMap() ) if is_public(_snake_case )}
assert dict_public_names > hash_public_names
| 621 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =process
_SCREAMING_SNAKE_CASE =params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dataset[i]
_SCREAMING_SNAKE_CASE =self.process(__lowerCAmelCase , **self.params )
return processed
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =loader
_SCREAMING_SNAKE_CASE =infer
_SCREAMING_SNAKE_CASE =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =loader_batch_size
# Internal bookkeeping
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_SCREAMING_SNAKE_CASE =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_SCREAMING_SNAKE_CASE ={}
for k, element in self._loader_batch_data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# Convert ModelOutput to tuple first
_SCREAMING_SNAKE_CASE =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_SCREAMING_SNAKE_CASE =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_SCREAMING_SNAKE_CASE =self._loader_batch_data.__class__(__lowerCAmelCase )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_SCREAMING_SNAKE_CASE =next(self.iterator )
_SCREAMING_SNAKE_CASE =self.infer(__lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__lowerCAmelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =len(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
# Setting internal index to unwrap the batch
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __iter__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
_SCREAMING_SNAKE_CASE =None
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.subiterator is None:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_SCREAMING_SNAKE_CASE =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
_SCREAMING_SNAKE_CASE =next(self.subiterator )
return processed
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(__lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__lowerCAmelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =len(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(__lowerCAmelCase )
if is_last:
return accumulator
else:
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(__lowerCAmelCase )
return accumulator
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class __UpperCAmelCase ( _A ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =keya
_SCREAMING_SNAKE_CASE =keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 255 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _A , _A , unittest.TestCase ):
__snake_case :List[str] = IFPipeline
__snake_case :Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__snake_case :Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case :Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=0 ) -> List[str]:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(__lowerCAmelCase )
else:
__lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : str ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (64, 64, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (64, 64, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (64, 64, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
__lowercase = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 80 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.txt'}
_UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class snake_case_ ( _A ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ConvBertTokenizer
def __init__( self : str , _snake_case : Union[str, Any]=None , _snake_case : int=None , _snake_case : Dict=True , _snake_case : Any="[UNK]" , _snake_case : Optional[Any]="[SEP]" , _snake_case : Optional[int]="[PAD]" , _snake_case : Optional[Any]="[CLS]" , _snake_case : Optional[Any]="[MASK]" , _snake_case : Dict=True , _snake_case : Any=None , **_snake_case : List[str] , )->Tuple:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
__lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __lowerCAmelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase : Tuple = getattr(__lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowerCAmelCase : Any = do_lower_case
__lowerCAmelCase : Dict = strip_accents
__lowerCAmelCase : Optional[int] = tokenize_chinese_chars
__lowerCAmelCase : Dict = normalizer_class(**__lowerCAmelCase )
__lowerCAmelCase : Any = do_lower_case
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any , _snake_case : Any=None )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : str , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : str = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase ) | 504 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def __UpperCAmelCase ( __UpperCamelCase = "dhaka" , __UpperCamelCase = 5 ):
__lowercase : int = min(_snake_case , 50 ) # Prevent abuse!
__lowercase : Tuple = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__lowercase : List[Any] = requests.get('''https://www.google.com/search''' , params=_snake_case , headers=_snake_case )
__lowercase : Union[str, Any] = BeautifulSoup(html.text , '''html.parser''' )
__lowercase : str = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
__lowercase : Optional[Any] = json.dumps(_snake_case )
__lowercase : Optional[int] = json.loads(_snake_case )
__lowercase : Union[str, Any] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _snake_case , )
if not matched_google_image_data:
return 0
__lowercase : List[Any] = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_snake_case ) , )
__lowercase : Union[str, Any] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _snake_case , )
for index, fixed_full_res_image in enumerate(_snake_case ):
if index >= max_images:
return index
__lowercase : str = bytes(_snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase : Optional[int] = bytes(_snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase : List[str] = urllib.request.build_opener()
__lowercase : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_snake_case )
__lowercase : Optional[Any] = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
urllib.request.urlretrieve( # noqa: S310
_snake_case , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
a_ = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('Please provide a search term.')
raise
| 76 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowercase__ ( A_: Dict , A_: List[Any] , A_: str=8 ) -> str:
"""simple docstring"""
__UpperCAmelCase =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( A_: Dict , A_: Tuple=512 , A_: Any=512 ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__UpperCAmelCase =np.array(pil_image.convert("""RGB""" ) )
__UpperCAmelCase =arr.astype(np.floataa ) / 1_2_7.5 - 1
__UpperCAmelCase =np.transpose(_snake_case , [2, 0, 1] )
__UpperCAmelCase =torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class _A ( _A ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : DDPMScheduler , __SCREAMING_SNAKE_CASE : VQModel , ) -> Tuple:
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
__UpperCAmelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
# get the original timestep using init_timestep
__UpperCAmelCase =min(int(num_inference_steps * strength ) , __lowerCAmelCase )
__UpperCAmelCase =max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=None ) -> List[Any]:
if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}''' )
__UpperCAmelCase =image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
__UpperCAmelCase =batch_size * num_images_per_prompt
if image.shape[1] == 4:
__UpperCAmelCase =image
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =[
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
else:
__UpperCAmelCase =self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
__UpperCAmelCase =self.movq.config.scaling_factor * init_latents
__UpperCAmelCase =torch.cat([init_latents] , dim=0 )
__UpperCAmelCase =init_latents.shape
__UpperCAmelCase =randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
__UpperCAmelCase =self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase =init_latents
return latents
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' )
__UpperCAmelCase =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : int , __SCREAMING_SNAKE_CASE : str=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase =None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase =cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
__UpperCAmelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self : Union[str, Any] ) -> Any:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : float = 0.3 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[Any]:
__UpperCAmelCase =self._execution_device
__UpperCAmelCase =guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =image_embeds.shape[0]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase =image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =[image]
if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__UpperCAmelCase =torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 )
__UpperCAmelCase =image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase )
__UpperCAmelCase =self.movq.encode(__lowerCAmelCase )["""latents"""]
__UpperCAmelCase =latents.repeat_interleave(__lowerCAmelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase =self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase =timesteps[:1].repeat(batch_size * num_images_per_prompt )
__UpperCAmelCase , __UpperCAmelCase =downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
__UpperCAmelCase =self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase ={"""image_embeds""": image_embeds}
__UpperCAmelCase =self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase , __UpperCAmelCase =noise_pred.chunk(2 )
__UpperCAmelCase , __UpperCAmelCase =variance_pred.chunk(2 )
__UpperCAmelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase =self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
__UpperCAmelCase =self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__UpperCAmelCase =image * 0.5 + 0.5
__UpperCAmelCase =image.clamp(0 , 1 )
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase =self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 68 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str ) -> YolosConfig:
__A : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__A : Tuple = 1_92
__A : Dict = 7_68
__A : Dict = 12
__A : Union[str, Any] = 3
__A : int = [8_00, 13_33]
__A : List[str] = False
elif yolos_name == "yolos_s_dWr":
__A : Optional[int] = 3_30
__A : Union[str, Any] = 14
__A : str = 6
__A : List[Any] = 13_20
elif "yolos_s" in yolos_name:
__A : str = 3_84
__A : int = 15_36
__A : Any = 12
__A : Tuple = 6
elif "yolos_b" in yolos_name:
__A : List[str] = [8_00, 13_44]
__A : Optional[int] = 91
__A : Optional[Any] = 'huggingface/label-files'
__A : List[str] = 'coco-detection-id2label.json'
__A : List[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
__A : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
__A : List[Any] = idalabel
__A : Dict = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__A : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A : int = in_proj_weight[: config.hidden_size, :]
__A : Optional[int] = in_proj_bias[: config.hidden_size]
__A : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : List[Any] = in_proj_weight[-config.hidden_size :, :]
__A : List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __snake_case : str ) -> str:
if "backbone" in name:
__A : List[str] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__A : Dict = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__A : Tuple = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__A : Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__A : Optional[int] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__A : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__A : str = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__A : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : str = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__A : Optional[int] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__A : Dict = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__A : Dict = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _lowerCAmelCase ( __snake_case : dict , __snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(_snake_case )
if "qkv" in key:
__A : Optional[int] = key.split('.' )
__A : List[str] = int(key_split[2] )
__A : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__A : Dict = val[:dim, :]
__A : List[Any] = val[
dim : dim * 2, :
]
__A : Optional[int] = val[-dim:, :]
else:
__A : Optional[int] = val[:dim]
__A : Optional[int] = val[dim : dim * 2]
__A : List[Any] = val[-dim:]
else:
__A : Tuple = val
return orig_state_dict
def _lowerCAmelCase ( ) -> torch.Tensor:
__A : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Tuple = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Dict:
__A : Optional[Any] = get_yolos_config(_snake_case )
# load original state_dict
__A : List[Any] = torch.load(_snake_case , map_location='cpu' )['model']
# load 🤗 model
__A : Any = YolosForObjectDetection(_snake_case )
model.eval()
__A : Any = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
__A : Union[str, Any] = 8_00 if yolos_name != 'yolos_ti' else 5_12
__A : List[Any] = YolosImageProcessor(format='coco_detection' , size=_snake_case )
__A : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
__A : Dict = model(**_snake_case )
__A ,__A : Tuple = outputs.logits, outputs.pred_boxes
__A ,__A : Any = None, None
if yolos_name == "yolos_ti":
__A : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__A : Optional[Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__A : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__A : Optional[Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__A : Tuple = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__A : str = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__A : Union[str, Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__A : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__A : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__A : Any = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
__A : Union[str, Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__A : str = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='hustvl' )
model.push_to_hub(_snake_case , organization='hustvl' )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 8 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_a : Dict = []
for num in range(len(_snake_case ) ):
_a : Any = 0
while 2 * i * i <= odd_composites[num]:
_a : List[str] = odd_composites[num] - 2 * i * i
if is_prime(_snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_snake_case ) == n:
return list_nums
return []
def UpperCAmelCase_ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 229 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCamelCase = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Any:
"""simple docstring"""
require_version(deps[pkg] , _snake_case )
| 26 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A : Optional[Any] = logging.get_logger(__name__)
class A (_A ):
'''simple docstring'''
def __init__( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 176 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Optional[int] = SamImageProcessor()
_SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def A ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Tuple = image_processor(__lowerCAmelCase , return_tensors='np' )
_SCREAMING_SNAKE_CASE : Dict = processor(images=__lowerCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [torch.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : int = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor.post_process_masks(
__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Dict = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Union[str, Any] = SamImageProcessor()
_SCREAMING_SNAKE_CASE : Dict = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A ( self , **lowerCAmelCase_ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Any = image_processor(__lowerCAmelCase , return_tensors='np' )
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = [tf.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : Optional[int] = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Optional[Any] = processor.post_process_masks(
__lowerCAmelCase , tf.convert_to_tensor(__lowerCAmelCase ) , tf.convert_to_tensor(__lowerCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_SCREAMING_SNAKE_CASE : Any = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Optional[Any] = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Optional[int] = SamImageProcessor()
_SCREAMING_SNAKE_CASE : Optional[Any] = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A ( self , **lowerCAmelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def A ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Any = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : Optional[Any] = [tf.convert_to_tensor(__lowerCAmelCase )]
_SCREAMING_SNAKE_CASE : int = [torch.tensor(__lowerCAmelCase )]
_SCREAMING_SNAKE_CASE : Any = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : int = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='tf' )
_SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Any = image_processor(__lowerCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__lowerCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE : int = image_processor(__lowerCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__lowerCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
| 621 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCAmelCase__ ( _A ):
"""simple docstring"""
__UpperCAmelCase : str = field(default='''automatic-speech-recognition''', metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCAmelCase : str = "audio"
__UpperCAmelCase : str = "transcription"
def _UpperCamelCase ( self , a_ ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCAmelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
lowerCamelCase_ : Any = copy.deepcopy(self )
lowerCamelCase_ : Any = self.input_schema.copy()
lowerCamelCase_ : Union[str, Any] = features[self.audio_column]
lowerCamelCase_ : Dict = input_schema
return task_template
@property
def _UpperCamelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 250 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 0 |
"""simple docstring"""
UpperCAmelCase_ : Any = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
UpperCAmelCase_ : Any = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 1_2,
'''Pm''': 1_5,
'''Em''': 1_8,
'''Zm''': 2_1,
'''Ym''': 2_4,
}
def _lowerCAmelCase(a : float , a : str , a : str ) -> float:
_SCREAMING_SNAKE_CASE =from_type.lower().strip('''s''' )
_SCREAMING_SNAKE_CASE =to_type.lower().strip('''s''' )
_SCREAMING_SNAKE_CASE =UNIT_SYMBOL.get(_snake_case , _snake_case )
_SCREAMING_SNAKE_CASE =UNIT_SYMBOL.get(_snake_case , _snake_case )
if from_sanitized not in METRIC_CONVERSION:
_SCREAMING_SNAKE_CASE =(
f"""Invalid \'from_type\' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
_SCREAMING_SNAKE_CASE =(
f"""Invalid \'to_type\' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
_SCREAMING_SNAKE_CASE =METRIC_CONVERSION[from_sanitized]
_SCREAMING_SNAKE_CASE =METRIC_CONVERSION[to_sanitized]
_SCREAMING_SNAKE_CASE =1
if from_exponent > to_exponent:
_SCREAMING_SNAKE_CASE =from_exponent - to_exponent
else:
_SCREAMING_SNAKE_CASE =-(to_exponent - from_exponent)
return value * pow(10 , _snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 255 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 0 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
__lowercase = len(_snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase = i + 1
else:
__lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 80 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_A )
class snake_case_ ( _A ):
def __init__( self : Dict , **_snake_case : Union[str, Any] )->Dict:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , _snake_case : Union[np.ndarray, bytes, str] , **_snake_case : Union[str, Any] )->Dict:
'''simple docstring'''
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] , **_snake_case : Optional[int] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__lowerCAmelCase : int = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : str , _snake_case : Union[str, Any] , _snake_case : Dict=None , _snake_case : str="This is a sound of {}." )->Optional[Any]:
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowerCAmelCase : str = requests.get(__lowerCAmelCase ).content
else:
with open(__lowerCAmelCase , """rb""" ) as f:
__lowerCAmelCase : int = f.read()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase : str = ffmpeg_read(__lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(__lowerCAmelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__lowerCAmelCase : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__lowerCAmelCase : Any = candidate_labels
__lowerCAmelCase : List[Any] = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
__lowerCAmelCase : int = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
__lowerCAmelCase : Tuple = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : List[str] , _snake_case : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = model_inputs.pop("""candidate_labels""" )
__lowerCAmelCase : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
__lowerCAmelCase : Any = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase : Any = text_inputs[0][0]
__lowerCAmelCase : Dict = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = model_outputs.pop("""candidate_labels""" )
__lowerCAmelCase : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
__lowerCAmelCase : Tuple = logits.softmax(dim=0 )
__lowerCAmelCase : Tuple = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__lowerCAmelCase : str = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda _snake_case : -x[0] )
]
return result | 504 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _lowerCamelCase ( *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCamelCase =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
__lowercase : Optional[int] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__lowercase : Dict = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
__lowercase : Dict = vqa_pipeline(__lowerCAmelCase , top_k=1 )
self.assertEqual(
__lowerCAmelCase , [
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
] , )
@require_torch
def _lowerCamelCase ( self ) -> Any:
__lowercase : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__lowercase : Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : Tuple = '''How many cats are there?'''
__lowercase : Optional[Any] = vqa_pipeline(image=__lowerCAmelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
__lowercase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
@slow
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : str = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
__lowercase : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : Union[str, Any] = '''How many cats are there?'''
__lowercase : Dict = vqa_pipeline(image=__lowerCAmelCase , question=__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
__lowercase : Tuple = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
__lowercase : Any = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowerCamelCase ( self ) -> List[Any]:
pass
| 76 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=1 / 255 , __SCREAMING_SNAKE_CASE : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =min_resolution
__UpperCAmelCase =max_resolution
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean
__UpperCAmelCase =image_std
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_pad
def _a ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=False ) -> Dict:
if not batched:
__UpperCAmelCase =image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase =image.size
else:
__UpperCAmelCase , __UpperCAmelCase =image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase =int(self.size["""shortest_edge"""] * h / w )
__UpperCAmelCase =self.size["""shortest_edge"""]
elif w > h:
__UpperCAmelCase =self.size["""shortest_edge"""]
__UpperCAmelCase =int(self.size["""shortest_edge"""] * w / h )
else:
__UpperCAmelCase =self.size["""shortest_edge"""]
__UpperCAmelCase =self.size["""shortest_edge"""]
else:
__UpperCAmelCase =[]
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase =max(__lowerCAmelCase , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__UpperCAmelCase =max(__lowerCAmelCase , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( _A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase =DeformableDetrImageProcessingTester(self )
@property
def _a ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_pad""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
def _a ( self : List[str] ) -> int:
__UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
__UpperCAmelCase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def _a ( self : Any ) -> Union[str, Any]:
pass
def _a ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
__UpperCAmelCase =image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Tuple ) -> int:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase =image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Optional[Any] ) -> int:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase =image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase =self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase =json.loads(f.read() )
__UpperCAmelCase ={"""image_id""": 39769, """annotations""": target}
# encode them
__UpperCAmelCase =DeformableDetrImageProcessor()
__UpperCAmelCase =image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCAmelCase )
__UpperCAmelCase =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCAmelCase , atol=1e-4 ) )
# verify area
__UpperCAmelCase =torch.tensor([5_887.9_600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCAmelCase ) )
# verify boxes
__UpperCAmelCase =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCAmelCase )
__UpperCAmelCase =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCAmelCase , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCAmelCase ) )
# verify is_crowd
__UpperCAmelCase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCAmelCase ) )
# verify class_labels
__UpperCAmelCase =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCAmelCase ) )
# verify orig_size
__UpperCAmelCase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCAmelCase ) )
# verify size
__UpperCAmelCase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCAmelCase ) )
@slow
def _a ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase =json.loads(f.read() )
__UpperCAmelCase ={"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__UpperCAmelCase =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__UpperCAmelCase =DeformableDetrImageProcessor(format="""coco_panoptic""" )
__UpperCAmelCase =image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCAmelCase )
__UpperCAmelCase =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCAmelCase , atol=1e-4 ) )
# verify area
__UpperCAmelCase =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCAmelCase ) )
# verify boxes
__UpperCAmelCase =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCAmelCase )
__UpperCAmelCase =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCAmelCase , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCAmelCase ) )
# verify is_crowd
__UpperCAmelCase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCAmelCase ) )
# verify class_labels
__UpperCAmelCase =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCAmelCase ) )
# verify masks
__UpperCAmelCase =822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowerCAmelCase )
# verify orig_size
__UpperCAmelCase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCAmelCase ) )
# verify size
__UpperCAmelCase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCAmelCase ) )
| 68 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 0 |
'''simple docstring'''
lowercase__ : int = '''Input must be a string of 8 numbers plus letter'''
lowercase__ : Union[str, Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _lowerCAmelCase ( __snake_case : str ) -> bool:
if not isinstance(_snake_case , _snake_case ):
__A : Optional[Any] = f'Expected string as input, found {type(_snake_case ).__name__}'
raise TypeError(_snake_case )
__A : List[Any] = spanish_id.replace('-' , '' ).upper()
if len(_snake_case ) != 9:
raise ValueError(_snake_case )
try:
__A : int = int(spanish_id_clean[0:8] )
__A : List[str] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case ) from ex
if letter.isdigit():
raise ValueError(_snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 0 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ (__a : np.array ):
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
if len(_snake_case ) == 0:
return False
__snake_case : int = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
__UpperCamelCase = input("Enter numbers separated by comma:\n").strip()
__UpperCamelCase = [int(item.strip()) for item in user_input.split(",")]
__UpperCamelCase = int(input("Enter the number to be found in the list:\n").strip())
__UpperCamelCase = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A : Optional[Any] = logging.get_logger(__name__)
# General docstring
A : Optional[int] = '''ResNetConfig'''
# Base docstring
A : str = '''microsoft/resnet-50'''
A : Any = [1, 2_0_4_8, 7, 7]
# Image classification docstring
A : Any = '''microsoft/resnet-50'''
A : List[Any] = '''tiger cat'''
A : Tuple = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Tuple:
"""simple docstring"""
super().__init__()
A__ = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase )
A__ = nn.BatchNormad(__lowerCAmelCase )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def a_ ( self : Tuple , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
A__ = self.convolution(__lowerCAmelCase )
A__ = self.normalization(__lowerCAmelCase )
A__ = self.activation(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : ResNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
A__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A__ = config.num_channels
def a_ ( self : List[str] , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A__ = self.embedder(__lowerCAmelCase )
A__ = self.pooler(__lowerCAmelCase )
return embedding
class A (nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 ) -> List[str]:
"""simple docstring"""
super().__init__()
A__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
A__ = nn.BatchNormad(__lowerCAmelCase )
def a_ ( self : List[Any] , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
A__ = self.convolution(__lowerCAmelCase )
A__ = self.normalization(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Dict:
"""simple docstring"""
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , )
A__ = ACTaFN[activation]
def a_ ( self : Any , __lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
A__ = hidden_state
A__ = self.layer(__lowerCAmelCase )
A__ = self.shortcut(__lowerCAmelCase )
hidden_state += residual
A__ = self.activation(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" , __lowerCAmelCase : int = 4 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = out_channels // reduction
A__ = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
A__ = ACTaFN[activation]
def a_ ( self : Any , __lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = hidden_state
A__ = self.layer(__lowerCAmelCase )
A__ = self.shortcut(__lowerCAmelCase )
hidden_state += residual
A__ = self.activation(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : ResNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
super().__init__()
A__ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a_ ( self : Any , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
A__ = input
for layer in self.layers:
A__ = layer(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : ResNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def a_ ( self : List[Any] , __lowerCAmelCase : Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(__lowerCAmelCase )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class A (_A ):
'''simple docstring'''
__lowerCamelCase : Dict = ResNetConfig
__lowerCamelCase : Union[str, Any] = "resnet"
__lowerCamelCase : Dict = "pixel_values"
__lowerCamelCase : Dict = True
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False ) -> Any:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = value
A : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A : Optional[Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _A , )
class A (_A ):
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
A__ = config
A__ = ResNetEmbeddings(__lowerCAmelCase )
A__ = ResNetEncoder(__lowerCAmelCase )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self : List[str] , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(__lowerCAmelCase )
A__ = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
A__ = encoder_outputs[0]
A__ = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , _A , )
class A (_A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
A__ = config.num_labels
A__ = ResNetModel(__lowerCAmelCase )
# classification head
A__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(__lowerCAmelCase )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ''' , _A , )
class A (_A , _A ):
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
super()._init_backbone(__lowerCAmelCase )
A__ = [config.embedding_size] + config.hidden_sizes
A__ = ResNetEmbeddings(__lowerCAmelCase )
A__ = ResNetEncoder(__lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def a_ ( self : Dict , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BackboneOutput:
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = self.embedder(__lowerCAmelCase )
A__ = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
A__ = outputs.hidden_states
A__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 176 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_: torch.FloatTensor
class _lowerCAmelCase ( _A , _A ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 6_5_5_3_6 , lowerCAmelCase_ = None , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = "fourier" , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ = "UNetMidBlock1D" , lowerCAmelCase_ = None , lowerCAmelCase_ = (3_2, 3_2, 6_4) , lowerCAmelCase_ = None , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = False , ) -> Optional[int]:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = sample_size
# time
if time_embedding_type == "fourier":
_SCREAMING_SNAKE_CASE : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCAmelCase , log=__lowerCAmelCase , flip_sin_to_cos=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_SCREAMING_SNAKE_CASE : Union[str, Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCAmelCase , downscale_freq_shift=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
_SCREAMING_SNAKE_CASE : int = block_out_channels[0] * 4
_SCREAMING_SNAKE_CASE : Tuple = TimestepEmbedding(
in_channels=__lowerCAmelCase , time_embed_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase , out_dim=block_out_channels[0] , )
_SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([] )
_SCREAMING_SNAKE_CASE : Optional[int] = None
# down
_SCREAMING_SNAKE_CASE : List[str] = in_channels
for i, down_block_type in enumerate(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : str = output_channel
_SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_SCREAMING_SNAKE_CASE : Optional[int] = i == len(__lowerCAmelCase ) - 1
_SCREAMING_SNAKE_CASE : Dict = get_down_block(
__lowerCAmelCase , num_layers=__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCAmelCase )
# mid
_SCREAMING_SNAKE_CASE : Optional[int] = get_mid_block(
__lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCAmelCase , add_downsample=__lowerCAmelCase , )
# up
_SCREAMING_SNAKE_CASE : Optional[int] = list(reversed(__lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
if out_block_type is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = out_channels
else:
_SCREAMING_SNAKE_CASE : Any = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : int = output_channel
_SCREAMING_SNAKE_CASE : str = (
reversed_block_out_channels[i + 1] if i < len(__lowerCAmelCase ) - 1 else final_upsample_channels
)
_SCREAMING_SNAKE_CASE : Optional[Any] = i == len(__lowerCAmelCase ) - 1
_SCREAMING_SNAKE_CASE : Optional[Any] = get_up_block(
__lowerCAmelCase , num_layers=__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : str = output_channel
# out
_SCREAMING_SNAKE_CASE : Optional[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_SCREAMING_SNAKE_CASE : Optional[int] = get_out_block(
out_block_type=__lowerCAmelCase , num_groups_out=__lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCAmelCase , act_fn=__lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> Union[UNetaDOutput, Tuple]:
_SCREAMING_SNAKE_CASE : Optional[Any] = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(sample.device )
_SCREAMING_SNAKE_CASE : List[str] = self.time_proj(__lowerCAmelCase )
if self.config.use_timestep_embedding:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_mlp(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = timestep_embed[..., None]
_SCREAMING_SNAKE_CASE : List[str] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_SCREAMING_SNAKE_CASE : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_SCREAMING_SNAKE_CASE : List[str] = ()
for downsample_block in self.down_blocks:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = downsample_block(hidden_states=__lowerCAmelCase , temb=__lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_SCREAMING_SNAKE_CASE : Any = self.mid_block(__lowerCAmelCase , __lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_SCREAMING_SNAKE_CASE : Dict = down_block_res_samples[-1:]
_SCREAMING_SNAKE_CASE : int = down_block_res_samples[:-1]
_SCREAMING_SNAKE_CASE : List[str] = upsample_block(__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , temb=__lowerCAmelCase )
# 5. post-process
if self.out_block:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.out_block(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCAmelCase )
| 621 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCAmelCase__ ( _A ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=0.9_99 , lowerCAmelCase_="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase_):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase_):
return math.exp(t * -12.0)
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""")
lowerCamelCase_ : Optional[Any] = []
for i in range(_snake_case):
lowerCamelCase_ : Optional[Any] = i / num_diffusion_timesteps
lowerCamelCase_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case) / alpha_bar_fn(_snake_case) , _snake_case))
return torch.tensor(_snake_case , dtype=torch.floataa)
class lowerCAmelCase__ ( _A, _A ):
"""simple docstring"""
@register_to_config
def __init__( self , a_ = 1000 , a_ = "fixed_small_log" , a_ = True , a_ = 1.0 , a_ = "epsilon" , a_ = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'" )
lowerCamelCase_ : Optional[int] = betas_for_alpha_bar(__lowerCAmelCase )
lowerCamelCase_ : Tuple = 1.0 - self.betas
lowerCamelCase_ : int = torch.cumprod(self.alphas , dim=0 )
lowerCamelCase_ : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCamelCase_ : Optional[Any] = 1.0
# setable values
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Optional[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowerCamelCase_ : int = variance_type
def _UpperCamelCase ( self , a_ , a_ = None ):
return sample
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[str] = num_inference_steps
lowerCamelCase_ : int = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCamelCase_ : Optional[int] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCamelCase_ : Union[str, Any] = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _UpperCamelCase ( self , a_ , a_=None , a_=None , a_=None ):
if prev_timestep is None:
lowerCamelCase_ : Any = t - 1
lowerCamelCase_ : int = self.alphas_cumprod[t]
lowerCamelCase_ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : Union[str, Any] = 1 - alpha_prod_t
lowerCamelCase_ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : str = self.betas[t]
else:
lowerCamelCase_ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCamelCase_ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCamelCase_ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowerCamelCase_ : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCamelCase_ : int = variance.log()
lowerCamelCase_ : List[Any] = beta.log()
lowerCamelCase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowerCamelCase_ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ = None , a_=None , a_ = True , ):
lowerCamelCase_ : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCamelCase_ ,lowerCamelCase_ : Any = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase_ : str = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCamelCase_ : Optional[Any] = t - 1
lowerCamelCase_ : Tuple = self.alphas_cumprod[t]
lowerCamelCase_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : Dict = 1 - alpha_prod_t
lowerCamelCase_ : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : str = self.betas[t]
lowerCamelCase_ : Any = self.alphas[t]
else:
lowerCamelCase_ : str = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCamelCase_ : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ : Tuple = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ : str = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Any = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCamelCase_ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase_ : Dict = 0
if t > 0:
lowerCamelCase_ : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowerCamelCase_ : Dict = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowerCamelCase_ : Tuple = variance
elif self.variance_type == "learned_range":
lowerCamelCase_ : Union[str, Any] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
lowerCamelCase_ : Tuple = variance * variance_noise
lowerCamelCase_ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCamelCase_ : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCamelCase_ : List[str] = timesteps.to(original_samples.device )
lowerCamelCase_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ : List[str] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 250 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( _A, unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = KandinskyVaaControlnetPipeline
lowercase : Any = ["image_embeds", "negative_image_embeds", "hint"]
lowercase : str = ["image_embeds", "negative_image_embeds", "hint"]
lowercase : List[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase : Union[str, Any] = False
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 3_2
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 3_2
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_0_0
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase_ ( self , _A , _A=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create hint
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
_SCREAMING_SNAKE_CASE =torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 255.0
_SCREAMING_SNAKE_CASE =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='''A robot, 4k photo'''
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipeline(
image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 255 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCamelCase : int = """src/diffusers"""
__UpperCamelCase : Dict = """."""
# This is to make sure the diffusers module imported is the one in the repo.
__UpperCamelCase : Tuple = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__UpperCamelCase : Dict = spec.loader.load_module()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , _snake_case ) is not None
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = object_name.split(""".""" )
__lowercase = 0
# First let's find the module where our object lives.
__lowercase = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , F'{module}.py' ) ):
i += 1
if i < len(_snake_case ):
__lowercase = os.path.join(_snake_case , parts[i] )
if i >= len(_snake_case ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_snake_case , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase = f.readlines()
# Now let's find the class / func in the code!
__lowercase = """"""
__lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowercase = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
return "".join(_snake_case )
__UpperCamelCase : Dict = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__UpperCamelCase : Optional[Any] = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__UpperCamelCase : List[str] = re.compile(r"""<FILL\s+[^>]*>""")
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = code.split("""\n""" )
__lowercase = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(get_indent(_snake_case ) ) > 0
if has_indent:
__lowercase = F'class Bla:\n{code}'
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case )
__lowercase = black.format_str(_snake_case , mode=_snake_case )
__lowercase , __lowercase = style_docstrings_in_code(_snake_case )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def snake_case ( lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
__lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowercase , __lowercase , __lowercase = search.groups()
__lowercase = find_code_in_diffusers(_snake_case )
__lowercase = get_indent(_snake_case )
__lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowercase = theoretical_indent
__lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowercase = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
__lowercase = lines[line_index]
__lowercase = _should_continue(_snake_case , _snake_case ) and re.search(F'^{indent}# End copy' , _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
__lowercase = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__lowercase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None]
__lowercase = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
__lowercase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__lowercase = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowercase , __lowercase , __lowercase = pattern.groups()
__lowercase = re.sub(_snake_case , _snake_case , _snake_case )
if option.strip() == "all-casing":
__lowercase = re.sub(obja.lower() , obja.lower() , _snake_case )
__lowercase = re.sub(obja.upper() , obja.upper() , _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowercase = blackify(lines[start_index - 1] + theoretical_code )
__lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowercase = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_snake_case )
return diffs
def snake_case ( lowerCamelCase = False ):
'''simple docstring'''
__lowercase = glob.glob(os.path.join(_snake_case , """**/*.py""" ) , recursive=_snake_case )
__lowercase = []
for filename in all_files:
__lowercase = is_copy_consistent(_snake_case , _snake_case )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
__lowercase = """\n""".join(_snake_case )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__UpperCamelCase : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 80 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Optional[Any]=13 , _snake_case : int=7 , _snake_case : Any=True , _snake_case : Tuple=True , _snake_case : Dict=True , _snake_case : Optional[Any]=True , _snake_case : Tuple=99 , _snake_case : Any=24 , _snake_case : Tuple=2 , _snake_case : str=6 , _snake_case : Optional[Any]=37 , _snake_case : Optional[Any]="gelu" , _snake_case : int=0.1 , _snake_case : Tuple=0.1 , _snake_case : int=512 , _snake_case : List[Any]=16 , _snake_case : Any=2 , _snake_case : List[Any]=0.02 , _snake_case : Optional[Any]=3 , _snake_case : Dict=None , _snake_case : Any=1000 , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Union[str, Any] = use_input_mask
__lowerCAmelCase : List[str] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Any = type_sequence_label_size
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Optional[Any] = scope
__lowerCAmelCase : int = range_bbox
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase : Optional[int] = bbox[i, j, 3]
__lowerCAmelCase : Optional[int] = bbox[i, j, 1]
__lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase : Tuple = bbox[i, j, 2]
__lowerCAmelCase : int = bbox[i, j, 0]
__lowerCAmelCase : Optional[Any] = t
__lowerCAmelCase : Tuple = None
if self.use_input_mask:
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Tuple = None
if self.use_labels:
__lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : str )->Union[str, Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Dict , _snake_case : str , _snake_case : str , _snake_case : Any , _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = LiltModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__lowerCAmelCase : List[str] = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__lowerCAmelCase : int = model(__lowerCAmelCase , bbox=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str , _snake_case : str , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Dict = LiltForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] , )->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = LiltForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Tuple = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( _A ,_A ,_A ,unittest.TestCase ):
A_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] , _snake_case : str , _snake_case : str )->Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = LiltModelTester(self )
__lowerCAmelCase : Tuple = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : int )->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = LiltModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@slow
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] , device=__lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase : str = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase )
__lowerCAmelCase : List[Any] = torch.Size([1, 2, 768] )
__lowerCAmelCase : Tuple = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) ) | 504 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
__lowercase : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ = logging.getLogger(__name__)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if metric == "rouge2":
__lowercase : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowercase : int = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowercase : List[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__lowercase : Tuple = ModelCheckpoint(
dirpath=_snake_case , filename=_snake_case , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=_snake_case , verbose=_snake_case , )
class UpperCAmelCase_ ( pl.Callback ):
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Any = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCAmelCase )
@rank_zero_only
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowercase : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowercase : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase : int = od / '''test_results.txt'''
__lowercase : List[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase : Tuple = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowercase : str = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , '''a+''' ) as writer:
for key in sorted(__lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase : str = metrics[key]
if isinstance(__lowerCAmelCase , torch.Tensor ):
__lowercase : Dict = val.item()
__lowercase : Dict = F"""{key}: {val:.6f}\n"""
writer.write(__lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
__lowercase : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCAmelCase )
@rank_zero_only
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
try:
__lowercase : Any = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase : Any = pl_module.model.num_parameters()
__lowercase : Dict = count_trainable_parameters(__lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCAmelCase , __lowerCAmelCase , '''test''' )
@rank_zero_only
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 76 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _A ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _a ( self : List[Any] ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=__lowerCAmelCase , )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class _A ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _a ( self : Tuple ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=__lowerCAmelCase , )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def lowercase__ ( ) -> Any:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _A ( _A ):
"""simple docstring"""
@require_beam
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase =DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCAmelCase =builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a ( self : int ) -> str:
import apache_beam as beam
__UpperCAmelCase =beam.io.parquetio.WriteToParquet
__UpperCAmelCase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase =DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__UpperCAmelCase =partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCAmelCase =builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase =DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a ( self : Any ) -> int:
__UpperCAmelCase =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase =NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__UpperCAmelCase =builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 68 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase__ : Any = pytest.mark.integration
lowercase__ : List[str] = {'''comet'''}
lowercase__ : Optional[int] = importlib.util.find_spec('''fairseq''') is not None
lowercase__ : int = {'''code_eval'''}
lowercase__ : int = os.name == '''nt'''
lowercase__ : Any = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowercase__ : Union[str, Any] = importlib.util.find_spec('''transformers''') is not None
def _lowerCAmelCase ( __snake_case : Any ) -> List[str]:
@wraps(_snake_case )
def wrapper(self : Optional[Any] , __snake_case : Dict ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _snake_case )
return wrapper
def _lowerCAmelCase ( __snake_case : Tuple ) -> str:
@wraps(_snake_case )
def wrapper(self : Optional[int] , __snake_case : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _snake_case )
return wrapper
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
@wraps(_snake_case )
def wrapper(self : List[Any] , __snake_case : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _snake_case )
return wrapper
def _lowerCAmelCase ( ) -> str:
__A : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_A , _A , _A )
@local
class SCREAMING_SNAKE_CASE (parameterized.TestCase ):
lowerCAmelCase = {}
lowerCAmelCase = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = '[...]'
__A : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __lowerCAmelCase)).module_path)
__A : int = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase)
# check parameters
__A : Optional[int] = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__):
with self.use_local_metrics():
try:
__A : Optional[int] = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = '[...]'
__A : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __lowerCAmelCase)).module_path)
# run doctest
with self.use_local_metrics():
__A : int = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def load_local_metric(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
return load_metric(os.path.join('metrics' , __lowerCAmelCase) , *__lowerCAmelCase , **__lowerCAmelCase)
with patch('datasets.load_metric') as mock_load_metric:
__A : Union[str, Any] = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
def wrapper(_UpperCAmelCase):
__A : Optional[Any] = contextmanager(__lowerCAmelCase)
__A : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Any:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE (_A ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
assert len(input_dict['input_ids']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__A : List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def _lowerCAmelCase ( __snake_case : int ) -> Any:
import torch
def bert_cos_score_idf(__snake_case : List[Any] , __snake_case : Optional[int] , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__A : Optional[Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
def load_from_checkpoint(__snake_case : Dict ):
class SCREAMING_SNAKE_CASE :
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
assert len(__lowerCAmelCase) == 2
__A : Optional[int] = [0.19, 0.92]
return scores, sum(__lowerCAmelCase) / len(__lowerCAmelCase)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__A : Tuple = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__A : Optional[int] = load_from_checkpoint
yield
def _lowerCAmelCase ( ) -> List[Any]:
__A : Optional[int] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__A : Optional[int] = 'ERROR'
__A : str = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(_snake_case , match=re.escape(_snake_case ) ):
metric.compute(predictions=[] , references=[] , scheme=_snake_case ) | 8 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int , __a : int , __a : bool , __a : list[int] , __a : float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(_snake_case ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
_a : str = math.log(len(_snake_case ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , _snake_case , _snake_case , _snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 229 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Union[str, Any] = str(_snake_case )
return n == n[::-1]
def _a ( _lowerCamelCase = 100_0000 ) -> Any:
"""simple docstring"""
__snake_case : List[str] = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 26 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A : List[str] = logging.get_logger(__name__)
A : List[Any] = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( __a :List[str] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
A__ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
A__ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
A__ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
A__ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
A__ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
A__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
A__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
A__ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
A__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( __a :OrderedDict , __a :int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
A__ = list(state_dict.keys() )
A__ = {}
for key in keys:
A__ = state_dict.pop(_snake_case )
A__ = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
A__ = val[:hidden_size, :]
A__ = val[hidden_size : 2 * hidden_size, :]
A__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A__ = val
else:
A__ = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( __a :str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
A__ = 1_0_2_4
A__ = 2_4
A__ = 1_6
elif checkpoint == "medium":
A__ = 1_5_3_6
A__ = 4_8
A__ = 2_4
elif checkpoint == "large":
A__ = 2_0_4_8
A__ = 4_8
A__ = 3_2
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
A__ = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def __lowerCamelCase ( __a :Optional[int] , __a :int=None , __a :int=None , __a :Optional[int]="cpu" ) -> List[str]:
"""simple docstring"""
A__ = MusicGen.get_pretrained(_snake_case , device=_snake_case )
A__ = decoder_config_from_checkpoint(_snake_case )
A__ = fairseq_model.lm.state_dict()
A__ , A__ = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
A__ = TaEncoderModel.from_pretrained("""t5-base""" )
A__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
A__ = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A__ , A__ = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(_snake_case ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
A__ = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
A__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A__ = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
A__ = AutoTokenizer.from_pretrained("""t5-base""" )
A__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
A__ = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
A__ = 2_0_4_8
A__ = 2_0_4_8
# set other default generation config params
A__ = int(3_0 * audio_encoder.config.frame_rate )
A__ = True
A__ = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
A : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 176 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_5_5 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> None:
super().__init__(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : str = size if size is not None else {'shortest_edge': 2_2_4}
_SCREAMING_SNAKE_CASE : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {'height': 2_5_6, 'width': 2_5_6}
_SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__lowerCAmelCase , param_name='crop_size' )
_SCREAMING_SNAKE_CASE : Tuple = do_resize
_SCREAMING_SNAKE_CASE : Union[str, Any] = size
_SCREAMING_SNAKE_CASE : Optional[Any] = resample
_SCREAMING_SNAKE_CASE : int = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
_SCREAMING_SNAKE_CASE : Tuple = crop_size
_SCREAMING_SNAKE_CASE : Tuple = do_flip_channel_order
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PIL.Image.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__lowerCAmelCase , size=size['shortest_edge'] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size['height'], size['width']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Optional[int]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> np.ndarray:
return flip_channel_order(__lowerCAmelCase , data_format=__lowerCAmelCase )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_SCREAMING_SNAKE_CASE : str = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCAmelCase , param_name='crop_size' )
_SCREAMING_SNAKE_CASE : Any = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : str = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : str = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_SCREAMING_SNAKE_CASE : Optional[int] = [self.flip_channel_order(image=__lowerCAmelCase ) for image in images]
_SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> str:
_SCREAMING_SNAKE_CASE : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
_SCREAMING_SNAKE_CASE : Dict = []
for idx in range(len(__lowerCAmelCase ) ):
_SCREAMING_SNAKE_CASE : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = logits.argmax(dim=1 )
_SCREAMING_SNAKE_CASE : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 621 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _lowerCAmelCase(a : Union[str, Any] , a : str=10 ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =[]
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _lowerCAmelCase(a : List[str] , a : Dict=10 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =[]
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
_SCREAMING_SNAKE_CASE =torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =torch.tensor([0.4, 0.2, -0.5] )
_SCREAMING_SNAKE_CASE =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_SCREAMING_SNAKE_CASE =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
_SCREAMING_SNAKE_CASE =criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =torch.tensor([0.4, 0.2, -0.5] )
_SCREAMING_SNAKE_CASE =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_SCREAMING_SNAKE_CASE =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(1_0_0_0 ):
_SCREAMING_SNAKE_CASE =criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] = nn.Linear(50, 50 ) if is_torch_available() else None
lowercase : Dict = AdamW(m.parameters(), lr=1_0.0 ) if is_torch_available() else None
lowercase : List[Any] = 10
def UpperCamelCase_ ( self , _A , _A , _A , _A=None ):
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_SCREAMING_SNAKE_CASE ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data
_SCREAMING_SNAKE_CASE =scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_SCREAMING_SNAKE_CASE =unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
_SCREAMING_SNAKE_CASE =scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
_SCREAMING_SNAKE_CASE =unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=f"""failed for {scheduler_func} in save and reload""" )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =fn
def __call__( self , *_A , **_A ):
'''simple docstring'''
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(map(self , scheduler.lr_lambdas ) )
| 255 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( _A ):
__snake_case :Dict = "new-model"
if is_tf_available():
class __UpperCamelCase ( _A ):
__snake_case :Optional[Any] = NewModelConfig
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = """bert-base-cased"""
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = """bert-base-cased"""
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
__lowercase , __lowercase = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase )
__lowercase , __lowercase = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : str ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
__lowercase , __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : int ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _a ( self : Any ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_tensorflow_probability
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase )
__lowercase , __lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = copy.deepcopy(model.config )
__lowercase = ["""FunnelBaseModel"""]
__lowercase = TFAutoModel.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
__lowercase = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("""new-model""" , __lowerCAmelCase )
__lowercase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = BertModelTester(self ).get_config()
__lowercase = NewModelConfig(**tiny_config.to_dict() )
__lowercase = auto_class.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
__lowercase = auto_class.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase = TFAutoModel.from_pretrained("""bert-base""" )
def _a ( self : int ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase = TFAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowercase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 80 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import string
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase : str = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase : str = string.ascii_uppercase.find(_snake_case )
__lowerCAmelCase : Tuple = num - key
if num < 0:
__lowerCAmelCase : List[Any] = num + len(string.ascii_uppercase )
__lowerCAmelCase : List[Any] = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase : Dict = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def _SCREAMING_SNAKE_CASE ( ) -> None:
__lowerCAmelCase : int = input("""Encrypted message: """ )
__lowerCAmelCase : Dict = message.upper()
decrypt(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 504 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
__lowercase : List[str] = {}
if train_file is not None:
__lowercase : str = [train_file]
if eval_file is not None:
__lowercase : List[str] = [eval_file]
if test_file is not None:
__lowercase : Union[str, Any] = [test_file]
__lowercase : str = datasets.load_dataset('''csv''' , data_files=_snake_case )
__lowercase : Any = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase : Dict = features_name.pop(_snake_case )
__lowercase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase : Any = {label: i for i, label in enumerate(_snake_case )}
__lowercase : Optional[int] = tokenizer.model_input_names
__lowercase : Union[str, Any] = {}
if len(_snake_case ) == 1:
for k in files.keys():
__lowercase : List[str] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_snake_case , max_length=_snake_case , padding='''max_length''' ) , batched=_snake_case , )
elif len(_snake_case ) == 2:
for k in files.keys():
__lowercase : List[str] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_snake_case , max_length=_snake_case , padding='''max_length''' , ) , batched=_snake_case , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase : Any = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase : Any = {k: v for k, v in ex.items() if k in input_names}
__lowercase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase : int = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
__lowercase : Tuple = (
tf.data.Dataset.from_generator(
_snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase : Dict = (
tf.data.Dataset.from_generator(
_snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase : str = (
tf.data.Dataset.from_generator(
_snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(metadata={"help": "Which column contains the label"} )
UpperCamelCase =field(default=_A , metadata={"help": "The path of the training file"} )
UpperCamelCase =field(default=_A , metadata={"help": "The path of the development file"} )
UpperCamelCase =field(default=_A , metadata={"help": "The path of the test file"} )
UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase =field(
default=_A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase =field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase =field(default=_A , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase =field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase ,__lowercase ,__lowercase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_snake_case , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_snake_case ) , labelaid=_snake_case , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCamelCase ) -> Dict:
__lowercase : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase : str = TFTrainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase : Any = trainer.evaluate()
__lowercase : Any = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_snake_case )
return results
if __name__ == "__main__":
main()
| 76 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] = field(
default=_A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=_A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=_A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : bool = field(default=_A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCamelCase : bool = field(default=_A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCamelCase : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCamelCase : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCamelCase : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCamelCase : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCamelCase : Optional[str] = field(default=_A , metadata={'help': 'Source language id for translation.'} )
lowerCamelCase : Optional[str] = field(default=_A , metadata={'help': 'Target language id for translation.'} )
lowerCamelCase : Optional[int] = field(default=_A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCamelCase : bool = field(
default=_A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowercase__ ( A_: List[Any] , A_: Dict , A_: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_snake_case , os.path.join(_snake_case , F'''{split}_results.json''' ) )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(_snake_case , _snake_case , _snake_case ):
assert hasattr(_snake_case , _snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
__UpperCAmelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__UpperCAmelCase =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case , _snake_case ):
__UpperCAmelCase =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__UpperCAmelCase =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__UpperCAmelCase =SeqaSeqDataset
# Get datasets
__UpperCAmelCase =(
dataset_class(
_snake_case , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
__UpperCAmelCase =(
dataset_class(
_snake_case , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__UpperCAmelCase =(
dataset_class(
_snake_case , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__UpperCAmelCase =(
build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None
)
__UpperCAmelCase =SeqaSeqTrainer(
model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator(
_snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , )
__UpperCAmelCase ={}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
__UpperCAmelCase =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__UpperCAmelCase =train_result.metrics
__UpperCAmelCase =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase =trainer.evaluate(metric_key_prefix="""val""" )
__UpperCAmelCase =data_args.n_val
__UpperCAmelCase =round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__UpperCAmelCase =trainer.predict(test_dataset=_snake_case , metric_key_prefix="""test""" )
__UpperCAmelCase =test_output.metrics
__UpperCAmelCase =data_args.n_test
if trainer.is_world_process_zero():
__UpperCAmelCase =round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
__UpperCAmelCase =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__UpperCAmelCase =lmap(str.strip , _snake_case )
write_txt_file(_snake_case , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_snake_case , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def lowercase__ ( A_: Any ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 68 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.0 , _UpperCAmelCase = None , _UpperCAmelCase = "geglu" , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = "layer_norm" , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Optional[int] = only_cross_attention
__A : int = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
__A : Dict = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.')
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__A : Dict = AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase)
elif self.use_ada_layer_norm_zero:
__A : int = AdaLayerNormZero(__lowerCAmelCase , __lowerCAmelCase)
else:
__A : str = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
__A : List[str] = Attention(
query_dim=__lowerCAmelCase , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__A : Union[str, Any] = (
AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase)
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
)
__A : Optional[Any] = Attention(
query_dim=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , upcast_attention=__lowerCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
__A : int = None
__A : Optional[int] = None
# 3. Feed-forward
__A : Any = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
__A : List[str] = FeedForward(__lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn=__lowerCAmelCase , final_dropout=__lowerCAmelCase)
# let chunk size default to None
__A : Union[str, Any] = None
__A : List[str] = 0
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = chunk_size
__A : List[str] = dim
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
__A : Dict = self.norma(__lowerCAmelCase , __lowerCAmelCase)
elif self.use_ada_layer_norm_zero:
__A ,__A ,__A ,__A ,__A : List[str] = self.norma(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hidden_dtype=hidden_states.dtype)
else:
__A : Optional[int] = self.norma(__lowerCAmelCase)
__A : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__A : int = self.attna(
__lowerCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
if self.use_ada_layer_norm_zero:
__A : Dict = gate_msa.unsqueeze(1) * attn_output
__A : Any = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__A : Optional[int] = (
self.norma(__lowerCAmelCase , __lowerCAmelCase) if self.use_ada_layer_norm else self.norma(__lowerCAmelCase)
)
__A : Tuple = self.attna(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
__A : Optional[Any] = attn_output + hidden_states
# 3. Feed-forward
__A : Tuple = self.norma(__lowerCAmelCase)
if self.use_ada_layer_norm_zero:
__A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.')
__A : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__A : Optional[int] = torch.cat(
[self.ff(__lowerCAmelCase) for hid_slice in norm_hidden_states.chunk(__lowerCAmelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
__A : Optional[Any] = self.ff(__lowerCAmelCase)
if self.use_ada_layer_norm_zero:
__A : str = gate_mlp.unsqueeze(1) * ff_output
__A : Any = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "geglu" , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Tuple = int(dim * mult)
__A : Dict = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__A : str = GELU(__lowerCAmelCase , __lowerCAmelCase)
if activation_fn == "gelu-approximate":
__A : List[Any] = GELU(__lowerCAmelCase , __lowerCAmelCase , approximate='tanh')
elif activation_fn == "geglu":
__A : Any = GEGLU(__lowerCAmelCase , __lowerCAmelCase)
elif activation_fn == "geglu-approximate":
__A : Dict = ApproximateGELU(__lowerCAmelCase , __lowerCAmelCase)
__A : Any = nn.ModuleList([])
# project in
self.net.append(__lowerCAmelCase)
# project dropout
self.net.append(nn.Dropout(__lowerCAmelCase))
# project out
self.net.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for module in self.net:
__A : Optional[int] = module(__lowerCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "none"):
'''simple docstring'''
super().__init__()
__A : Optional[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase)
__A : List[str] = approximate
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.proj(__lowerCAmelCase)
__A : str = self.gelu(__lowerCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Optional[Any] = nn.Linear(__lowerCAmelCase , dim_out * 2)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A ,__A : List[str] = self.proj(__lowerCAmelCase).chunk(2 , dim=-1)
return hidden_states * self.gelu(__lowerCAmelCase)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.proj(__lowerCAmelCase)
return x * torch.sigmoid(1.702 * x)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Union[str, Any] = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase)
__A : Dict = nn.SiLU()
__A : List[Any] = nn.Linear(__lowerCAmelCase , embedding_dim * 2)
__A : Optional[int] = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.linear(self.silu(self.emb(__lowerCAmelCase)))
__A ,__A : str = torch.chunk(__lowerCAmelCase , 2)
__A : str = self.norm(__lowerCAmelCase) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Dict = CombinedTimestepLabelEmbeddings(__lowerCAmelCase , __lowerCAmelCase)
__A : Dict = nn.SiLU()
__A : Any = nn.Linear(__lowerCAmelCase , 6 * embedding_dim , bias=__lowerCAmelCase)
__A : Dict = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase , eps=1e-6)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[str] = self.linear(self.silu(self.emb(__lowerCAmelCase , __lowerCAmelCase , hidden_dtype=__lowerCAmelCase)))
__A ,__A ,__A ,__A ,__A ,__A : int = emb.chunk(6 , dim=1)
__A : Any = self.norm(__lowerCAmelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 1e-5):
'''simple docstring'''
super().__init__()
__A : Tuple = num_groups
__A : Optional[int] = eps
if act_fn is None:
__A : Any = None
else:
__A : List[Any] = get_activation(__lowerCAmelCase)
__A : Any = nn.Linear(__lowerCAmelCase , out_dim * 2)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.act:
__A : Tuple = self.act(__lowerCAmelCase)
__A : int = self.linear(__lowerCAmelCase)
__A : Union[str, Any] = emb[:, :, None, None]
__A ,__A : str = emb.chunk(2 , dim=1)
__A : Union[str, Any] = F.group_norm(__lowerCAmelCase , self.num_groups , eps=self.eps)
__A : int = x * (1 + scale) + shift
return x | 8 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
__lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 1_0:
_a : Union[str, Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 1_0:
_a : Optional[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 1_0:
_a : str = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 1_0:
_a : Optional[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_a : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_a : Union[str, Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_a : List[str] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_a : str = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Any , __a : Optional[int] ):
"""simple docstring"""
_a : Optional[int] = {}
import re
_a : Optional[int] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_a : Any = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_a : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_a : Optional[int] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : str = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_a : int = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_a : str = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : List[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_snake_case ):
_a : List[str] = re_encoder_block_conv_in.match(_snake_case )
_a : List[str] = regex_match.groups()
_a : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_a : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_a : List[Any] = re_encoder_block_conv_in.sub(_snake_case , _snake_case )
elif re_encoder_block_resnet.fullmatch(_snake_case ):
_a : Optional[Any] = re_encoder_block_resnet.match(_snake_case )
_a : Optional[Any] = regex_match.groups()
_a : Dict = int(groups[2] ) * 2 + int(groups[3] )
_a : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
_a : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_a : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : int = prefix + resnet_block
_a : Union[str, Any] = re_encoder_block_resnet.sub(_snake_case , _snake_case )
elif re_encoder_block_proj_out.fullmatch(_snake_case ):
_a : str = re_encoder_block_proj_out.match(_snake_case )
_a : str = regex_match.groups()
_a : str = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_a : int = re_encoder_block_proj_out.sub(_snake_case , _snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_snake_case ):
_a : List[str] = re_decoder_block_conv_out.match(_snake_case )
_a : Any = regex_match.groups()
_a : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_a : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_a : Optional[int] = re_decoder_block_conv_out.sub(_snake_case , _snake_case )
elif re_decoder_block_resnet.fullmatch(_snake_case ):
_a : Optional[int] = re_decoder_block_resnet.match(_snake_case )
_a : Optional[Any] = regex_match.groups()
_a : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_a : Dict = {'1': 1, '3': 2}[groups[-2]]
_a : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_a : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : List[Any] = prefix + resnet_block
_a : Tuple = re_decoder_block_resnet.sub(_snake_case , _snake_case )
elif re_decoder_block_proj_in.fullmatch(_snake_case ):
_a : Dict = re_decoder_block_proj_in.match(_snake_case )
_a : List[str] = regex_match.groups()
_a : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_a : Dict = re_decoder_block_proj_in.sub(_snake_case , _snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_snake_case ):
_a : Union[str, Any] = re_prior_cond_conv_out.match(_snake_case )
_a : Tuple = regex_match.groups()
_a : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
_a : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_a : Tuple = re_prior_cond_conv_out.sub(_snake_case , _snake_case )
elif re_prior_cond_resnet.fullmatch(_snake_case ):
_a : Dict = re_prior_cond_resnet.match(_snake_case )
_a : List[Any] = regex_match.groups()
_a : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_a : int = {'1': 1, '3': 2}[groups[-2]]
_a : Dict = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_a : List[Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : Optional[int] = prefix + resnet_block
_a : Any = re_prior_cond_resnet.sub(_snake_case , _snake_case )
elif re_prior_cond_proj_in.fullmatch(_snake_case ):
_a : Dict = re_prior_cond_proj_in.match(_snake_case )
_a : List[Any] = regex_match.groups()
_a : Any = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_a : str = re_prior_cond_proj_in.sub(_snake_case , _snake_case )
# keep original key
else:
_a : Tuple = original_key
_a : List[Any] = replace_key(_snake_case )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_a : int = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_a : Optional[Any] = original_key
_a : str = original_key
_a : List[str] = value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ (__a : Optional[Any]=None , __a : Union[str, Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_a : List[str] = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_snake_case )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_snake_case )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_a : int = MODEL_MAPPING[model_name.split('/' )[-1]]
_a : Tuple = JukeboxConfig.from_pretrained(_snake_case )
_a : Union[str, Any] = JukeboxModel(_snake_case )
_a : Optional[int] = []
_a : str = {}
for i, dict_name in enumerate(_snake_case ):
_a : Optional[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_a : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_a : List[str] = old_dic[k]
elif k.endswith('.w' ):
_a : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_a : Tuple = old_dic[k]
else:
_a : Dict = old_dic[k]
_a : List[Any] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_a : Any = fix_jukebox_keys(_snake_case , model.state_dict() , _snake_case , _snake_case )
weight_dict.append(_snake_case )
_a : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_snake_case )
for i in range(len(_snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_snake_case , _snake_case )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 229 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _A ( _A ):
lowercase__: Union[str, Any] = "unispeech-sat"
def __init__( self : Tuple , __magic_name__ : Tuple=32 , __magic_name__ : List[Any]=7_68 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Dict=30_72 , __magic_name__ : str="gelu" , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Any=1E-5 , __magic_name__ : List[Any]="group" , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : int=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : List[str]=False , __magic_name__ : int=1_28 , __magic_name__ : Tuple=16 , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=0.05 , __magic_name__ : str=10 , __magic_name__ : List[str]=2 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Any=10 , __magic_name__ : str=0 , __magic_name__ : List[str]=3_20 , __magic_name__ : List[Any]=2 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Dict=1_00 , __magic_name__ : Optional[int]=2_56 , __magic_name__ : List[Any]=2_56 , __magic_name__ : Any=0.1 , __magic_name__ : List[Any]="mean" , __magic_name__ : Dict=False , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any]=2_56 , __magic_name__ : int=(5_12, 5_12, 5_12, 5_12, 15_00) , __magic_name__ : Optional[Any]=(5, 3, 3, 1, 1) , __magic_name__ : str=(1, 2, 3, 1, 1) , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : List[str]=0 , __magic_name__ : Dict=1 , __magic_name__ : List[str]=2 , __magic_name__ : Dict=5_04 , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__snake_case : Any = hidden_size
__snake_case : List[Any] = feat_extract_norm
__snake_case : Union[str, Any] = feat_extract_activation
__snake_case : Optional[Any] = list(__lowerCAmelCase )
__snake_case : Optional[Any] = list(__lowerCAmelCase )
__snake_case : Optional[int] = list(__lowerCAmelCase )
__snake_case : int = conv_bias
__snake_case : Optional[Any] = num_conv_pos_embeddings
__snake_case : Optional[Any] = num_conv_pos_embedding_groups
__snake_case : Optional[int] = len(self.conv_dim )
__snake_case : List[str] = num_hidden_layers
__snake_case : Optional[Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : int = num_attention_heads
__snake_case : List[Any] = hidden_dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Dict = activation_dropout
__snake_case : List[str] = feat_proj_dropout
__snake_case : List[str] = final_dropout
__snake_case : Optional[Any] = layerdrop
__snake_case : Dict = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[int] = vocab_size
__snake_case : List[Any] = num_clusters
__snake_case : Any = do_stable_layer_norm
__snake_case : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : str = apply_spec_augment
__snake_case : Dict = mask_time_prob
__snake_case : List[str] = mask_time_length
__snake_case : List[str] = mask_time_min_masks
__snake_case : Tuple = mask_feature_prob
__snake_case : Optional[Any] = mask_feature_length
__snake_case : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : Any = num_codevectors_per_group
__snake_case : Optional[int] = num_codevector_groups
__snake_case : Tuple = contrastive_logits_temperature
__snake_case : Optional[int] = feat_quantizer_dropout
__snake_case : Optional[Any] = num_negatives
__snake_case : Optional[Any] = codevector_dim
__snake_case : int = proj_codevector_dim
__snake_case : List[str] = diversity_loss_weight
# ctc loss
__snake_case : Tuple = ctc_loss_reduction
__snake_case : Dict = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : Optional[int] = list(__lowerCAmelCase )
__snake_case : int = list(__lowerCAmelCase )
__snake_case : Optional[Any] = list(__lowerCAmelCase )
__snake_case : Any = xvector_output_dim
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Any = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A (_A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = PegasusTokenizer
__lowerCamelCase : Any = PegasusTokenizerFast
__lowerCamelCase : int = True
__lowerCamelCase : Union[str, Any] = True
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Any ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = """</s>"""
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__lowerCAmelCase ) , 11_03 )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def a_ ( self : Any ) -> int:
"""simple docstring"""
A__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ = (
"""Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
A__ = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
A__ = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
A__ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
A__ = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
A__ = """To ensure a smooth flow of bank resolutions."""
A__ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
A__ = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = ["""This is going to be way too long.""" * 1_50, """short example"""]
A__ = ["""not super long but more than 5 tokens""", """tiny"""]
A__ = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
A__ = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = {"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A (_A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = PegasusTokenizer
__lowerCamelCase : Dict = PegasusTokenizerFast
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
def a_ ( self : int ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Dict ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a_ ( self : Any , **__lowerCAmelCase : Tuple ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ = (
"""Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
A__ = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
A__ = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = ["""This is going to be way too long.""" * 10_00, """short example"""]
A__ = ["""not super long but more than 5 tokens""", """tiny"""]
A__ = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
A__ = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
A__ = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 176 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 0 |
"""simple docstring"""
lowerCAmelCase__ = 256
# Modulus to hash a string
lowerCAmelCase__ = 1_000_003
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(_snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = len(_snake_case )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : str = 'abc1abc12'
_SCREAMING_SNAKE_CASE : Any = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_snake_case, _snake_case ) and not rabin_karp(_snake_case, _snake_case )
# Test 2)
_SCREAMING_SNAKE_CASE : str = 'ABABX'
_SCREAMING_SNAKE_CASE : List[str] = 'ABABZABABYABABX'
assert rabin_karp(_snake_case, _snake_case )
# Test 3)
_SCREAMING_SNAKE_CASE : Optional[Any] = 'AAAB'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'ABAAAAAB'
assert rabin_karp(_snake_case, _snake_case )
# Test 4)
_SCREAMING_SNAKE_CASE : Dict = 'abcdabcy'
_SCREAMING_SNAKE_CASE : Tuple = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_snake_case, _snake_case )
# Test 5)
_SCREAMING_SNAKE_CASE : str = 'Lü'
_SCREAMING_SNAKE_CASE : Dict = 'Lüsai'
assert rabin_karp(_snake_case, _snake_case )
_SCREAMING_SNAKE_CASE : Any = 'Lue'
assert not rabin_karp(_snake_case, _snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 621 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase__ ( _A ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Dict = seq_length
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : Dict = use_input_mask
lowerCamelCase_ : Optional[int] = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : str = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : str = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Any = num_labels
lowerCamelCase_ : str = num_choices
lowerCamelCase_ : Optional[int] = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : str = None
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Any = None
if self.use_labels:
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : str = DistilBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : Dict = model(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = DistilBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = DistilBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = self.num_labels
lowerCamelCase_ : List[str] = DistilBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = self.num_labels
lowerCamelCase_ : Tuple = DistilBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = self.num_choices
lowerCamelCase_ : Union[str, Any] = DistilBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
((lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_)) : Union[str, Any] = config_and_inputs
lowerCamelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _A, _A, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Dict = True
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = DistilBertModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
@slow
def _UpperCamelCase ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Dict = DistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCamelCase_ : str = True
lowerCamelCase_ : Optional[int] = model_class(config=__lowerCAmelCase )
lowerCamelCase_ : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : List[Any] = torch.jit.trace(
__lowerCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , "traced_model.pt" ) )
lowerCamelCase_ : int = torch.jit.load(os.path.join(__lowerCAmelCase , "traced_model.pt" ) , map_location=__lowerCAmelCase )
loaded(inputs_dict["input_ids"].to(__lowerCAmelCase ) , inputs_dict["attention_mask"].to(__lowerCAmelCase ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
lowerCamelCase_ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 250 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase(a : Any , a : Dict ) -> Any:
_SCREAMING_SNAKE_CASE =b.T
_SCREAMING_SNAKE_CASE =np.sum(np.square(_snake_case ) , axis=1 )
_SCREAMING_SNAKE_CASE =np.sum(np.square(_snake_case ) , axis=0 )
_SCREAMING_SNAKE_CASE =np.matmul(_snake_case , _snake_case )
_SCREAMING_SNAKE_CASE =aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowerCAmelCase(a : Optional[Any] , a : Optional[Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =x.reshape(-1 , 3 )
_SCREAMING_SNAKE_CASE =squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class __UpperCAmelCase ( _A ):
'''simple docstring'''
lowercase : Union[str, Any] = ["pixel_values"]
def __init__( self , _A = None , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = True , **_A , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_SCREAMING_SNAKE_CASE =get_size_dict(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =np.array(__lowerCAmelCase ) if clusters is not None else None
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_color_quantize
def UpperCamelCase_ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
__lowerCAmelCase , size=(size['''height'''], size['''width''']) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase_ ( self , _A , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =rescale(image=__lowerCAmelCase , scale=1 / 127.5 , data_format=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =image - 1
return image
def UpperCamelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_SCREAMING_SNAKE_CASE =clusters if clusters is not None else self.clusters
_SCREAMING_SNAKE_CASE =np.array(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =[to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE =[self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE =[self.normalize(image=__lowerCAmelCase ) for image in images]
if do_color_quantize:
_SCREAMING_SNAKE_CASE =[to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_SCREAMING_SNAKE_CASE =np.array(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =color_quantize(__lowerCAmelCase , __lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_SCREAMING_SNAKE_CASE =images.shape[0]
_SCREAMING_SNAKE_CASE =images.reshape(__lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_SCREAMING_SNAKE_CASE =list(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE =[to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_SCREAMING_SNAKE_CASE ={'''input_ids''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 255 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_A ) , 'Tatoeba directory does not exist.' )
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 80 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 0 |
import os
import pytest
from attr import dataclass
_UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class snake_case_ :
A_ = 42
A_ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
A_ = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
A_ = {**hyperparameters, "max_steps": 1000}
@property
def UpperCAmelCase__ ( self : Optional[Any] )->str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
return F'''{self.framework}-transfromers-test'''
@property
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
__lowerCAmelCase : List[str] = SageMakerTestEnvironment(framework=request.cls.framework ) | 504 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=6.0 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_="fp4" , UpperCamelCase_=False , **UpperCamelCase_ , ) -> List[Any]:
__lowercase : str = load_in_abit
__lowercase : str = load_in_abit
__lowercase : Any = llm_inta_threshold
__lowercase : Tuple = llm_inta_skip_modules
__lowercase : Tuple = llm_inta_enable_fpaa_cpu_offload
__lowercase : Any = llm_inta_has_fpaa_weight
__lowercase : List[Any] = bnb_abit_quant_type
__lowercase : Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowercase : Tuple = torch.floataa
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Tuple = getattr(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.dtype ):
__lowercase : Any = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def _lowerCamelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , __lowerCAmelCase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowerCAmelCase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowerCAmelCase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __lowerCAmelCase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __lowerCAmelCase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __lowerCAmelCase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def _lowerCamelCase ( self ) -> str:
return self.load_in_abit or self.load_in_abit
def _lowerCamelCase ( self ) -> Tuple:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Optional[Any] = cls(**__lowerCAmelCase )
__lowercase : List[str] = []
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
to_remove.append(__lowerCAmelCase )
for key in to_remove:
kwargs.pop(__lowerCAmelCase , __lowerCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
__lowercase : int = self.to_dict()
__lowercase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '''\n'''
writer.write(__lowerCAmelCase )
def _lowerCamelCase ( self ) -> Dict[str, Any]:
__lowercase : List[Any] = copy.deepcopy(self.__dict__ )
__lowercase : Optional[int] = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ) -> List[str]:
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def _lowerCamelCase ( self , UpperCamelCase_ = True ) -> str:
if use_diff is True:
__lowercase : str = self.to_diff_dict()
else:
__lowercase : str = self.to_dict()
return json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
def _lowerCamelCase ( self ) -> Dict[str, Any]:
__lowercase : Dict = self.to_dict()
# get the default config dict
__lowercase : int = BitsAndBytesConfig().to_dict()
__lowercase : List[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowercase : Optional[int] = value
return serializable_config_dict
| 76 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__ ( A_: str , A_: Union[str, Any]=None ) -> Any:
"""simple docstring"""
require_version(deps[pkg] , _snake_case )
| 68 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Optional[int] = 16
lowercase__ : Optional[int] = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Optional[Any]:
__A : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__A : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : List[str] = datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : Tuple = 16
elif accelerator.mixed_precision != "no":
__A : Optional[int] = 8
else:
__A : List[Any] = None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : Any = DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
__A : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Optional[Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : str ) -> Optional[int]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
__A : Dict = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__A : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__A : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Any = config['lr']
__A : Dict = int(config['num_epochs'] )
__A : Optional[int] = int(config['seed'] )
__A : List[Any] = int(config['batch_size'] )
set_seed(_snake_case )
__A ,__A : List[str] = get_dataloaders(_snake_case , _snake_case )
__A : Optional[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Dict = batch_size // MAX_GPU_BATCH_SIZE
__A : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__A : List[str] = AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
__A : str = get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=1_00 , num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : Dict = accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__A : str = os.path.split(_snake_case )[-1].split('.' )[0]
accelerator.init_trackers(_snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__A : int = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : Tuple = model(**_snake_case )
__A : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__A : str = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__A : List[Any] = model(**_snake_case )
__A : str = outputs.logits.argmax(dim=-1 )
__A ,__A : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
__A : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(_snake_case ),
'epoch': epoch,
} , step=_snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ) -> List[Any]:
__A : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_snake_case , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__A : Union[str, Any] = parser.parse_args()
__A : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main() | 8 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCAmelCase__ ( ctypes.Structure ):
"""simple docstring"""
__UpperCAmelCase : Any = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def UpperCAmelCase_ ():
"""simple docstring"""
if os.name == "nt":
_a : Optional[Any] = CursorInfo()
_a : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_snake_case , ctypes.byref(_snake_case ) )
_a : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_snake_case , ctypes.byref(_snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def UpperCAmelCase_ ():
"""simple docstring"""
if os.name == "nt":
_a : Tuple = CursorInfo()
_a : List[str] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_snake_case , ctypes.byref(_snake_case ) )
_a : str = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_snake_case , ctypes.byref(_snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase_ ():
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 229 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__UpperCamelCase = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__UpperCamelCase = {
"jukebox": 512,
}
class _A ( _A ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: str = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase__: str = ["input_ids", "attention_mask"]
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : int=["v3", "v2", "v2"] , __magic_name__ : str=5_12 , __magic_name__ : Tuple=5 , __magic_name__ : str="<|endoftext|>" , **__magic_name__ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
super().__init__(
unk_token=__lowerCAmelCase , n_genres=__lowerCAmelCase , version=__lowerCAmelCase , max_n_lyric_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case : str = version
__snake_case : Union[str, Any] = max_n_lyric_tokens
__snake_case : Union[str, Any] = n_genres
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__snake_case : Dict = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__snake_case : int = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__snake_case : Any = json.load(__lowerCAmelCase )
__snake_case : Any = r"""[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__snake_case : List[Any] = oov.replace(r"""\-\'""" , r"""\-+\'""" )
__snake_case : Any = regex.compile(__lowerCAmelCase )
__snake_case : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
__snake_case : Tuple = {v: k for k, v in self.genres_encoder.items()}
__snake_case : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : Tuple = [self.artists_encoder.get(__lowerCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCAmelCase ) ):
__snake_case : Union[str, Any] = [self.genres_encoder.get(__lowerCAmelCase , 0 ) for genre in list_genres[genres]]
__snake_case : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__snake_case : Tuple = [[self.lyrics_encoder.get(__lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowercase__ ( self : int , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
return list(__lowerCAmelCase )
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , **__magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Tuple = self.prepare_for_tokenization(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case : List[Any] = self._tokenize(__lowerCAmelCase )
return artist, genre, lyrics
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__snake_case : int = artists[idx].lower()
__snake_case : Dict = [genres[idx].lower()]
else:
__snake_case : Any = self._normalize(artists[idx] ) + """.v2"""
__snake_case : Dict = [
self._normalize(__lowerCAmelCase ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__snake_case : Any = regex.compile(r"""[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+""" )
__snake_case : Optional[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n"""
__snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(__lowerCAmelCase ) )}
__snake_case : str = 0
__snake_case : int = len(__lowerCAmelCase ) + 1
__snake_case : Tuple = self.vocab
__snake_case : List[str] = {v: k for k, v in self.vocab.items()}
__snake_case : List[str] = """"""
else:
__snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+""" )
__snake_case : Union[str, Any] = self._run_strip_accents(__lowerCAmelCase )
__snake_case : Optional[Any] = lyrics.replace("""\\""" , """\n""" )
__snake_case : List[Any] = self.out_of_vocab.sub("""""" , __lowerCAmelCase ), [], []
return artists, genres, lyrics
def lowercase__ ( self : str , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = unicodedata.normalize("""NFD""" , __lowerCAmelCase )
__snake_case : int = []
for char in text:
__snake_case : str = unicodedata.category(__lowerCAmelCase )
if cat == "Mn":
continue
output.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = (
[chr(__lowerCAmelCase ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
__snake_case : Optional[int] = frozenset(__lowerCAmelCase )
__snake_case : Optional[Any] = re.compile(r"""_+""" )
__snake_case : Optional[int] = """""".join([c if c in accepted else """_""" for c in text.lower()] )
__snake_case : Dict = pattern.sub("""_""" , __lowerCAmelCase ).strip("""_""" )
return text
def lowercase__ ( self : Any , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
return " ".join(__lowerCAmelCase )
def lowercase__ ( self : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : bool = False ) -> Any:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case : Optional[int] = TensorType(__lowerCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
__snake_case : Dict = tf.constant
__snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
__snake_case : Any = torch.tensor
__snake_case : int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
__snake_case : int = jnp.array
__snake_case : Dict = _is_jax
else:
__snake_case : int = np.asarray
__snake_case : Optional[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__snake_case : Tuple = [inputs]
if not is_tensor(__lowerCAmelCase ):
__snake_case : List[Any] = as_tensor(__lowerCAmelCase )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.""" )
return inputs
def __call__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : List[str]="" , __magic_name__ : Any="pt" ) -> BatchEncoding:
"""simple docstring"""
__snake_case : int = [0, 0, 0]
__snake_case : Optional[Any] = [artist] * len(self.version )
__snake_case : Optional[int] = [genres] * len(self.version )
__snake_case , __snake_case , __snake_case : str = self.tokenize(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case , __snake_case , __snake_case : Any = self._convert_token_to_id(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case : Optional[Any] = [-INFINITY] * len(full_tokens[-1] )
__snake_case : Any = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : str = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCAmelCase ) )
__snake_case : Tuple = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCAmelCase ) )
__snake_case : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def lowercase__ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.artists_decoder.get(__lowerCAmelCase )
__snake_case : Optional[Any] = [self.genres_decoder.get(__lowerCAmelCase ) for genre in genres_index]
__snake_case : Any = [self.lyrics_decoder.get(__lowerCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 26 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 0 |
def __lowerCamelCase ( __a :int = 1_0_0 ) -> int:
"""simple docstring"""
A__ = set()
A__ = 0
A__ = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
A__ = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 176 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False, False, False
@dataclass
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: Optional[int] = None
SCREAMING_SNAKE_CASE_: bool = True
SCREAMING_SNAKE_CASE_: bool = True
SCREAMING_SNAKE_CASE_: Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_: ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE_: ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
SCREAMING_SNAKE_CASE_: str = field(default='Audio' , init=_A , repr=_A )
def __call__( self ) -> List[str]:
return self.pa_type
def A ( self , lowerCAmelCase_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_SCREAMING_SNAKE_CASE : str = BytesIO()
sf.write(__lowerCAmelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_SCREAMING_SNAKE_CASE : Any = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_SCREAMING_SNAKE_CASE : Dict = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2_7_6_7
_SCREAMING_SNAKE_CASE : List[str] = BytesIO(bytes() )
sf.write(__lowerCAmelCase , __lowerCAmelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
_SCREAMING_SNAKE_CASE : Union[str, Any] = xsplitext(__lowerCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
_SCREAMING_SNAKE_CASE : Any = token_per_repo_id or {}
_SCREAMING_SNAKE_CASE : Dict = path.split('::' )[-1]
try:
_SCREAMING_SNAKE_CASE : List[str] = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL )['repo_id']
_SCREAMING_SNAKE_CASE : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_SCREAMING_SNAKE_CASE : List[str] = None
with xopen(__lowerCAmelCase , 'rb' , use_auth_token=__lowerCAmelCase ) as f:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = sf.read(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = sf.read(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = array.T
if self.mono:
_SCREAMING_SNAKE_CASE : int = librosa.to_mono(__lowerCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_SCREAMING_SNAKE_CASE : Tuple = librosa.resample(__lowerCAmelCase , orig_sr=__lowerCAmelCase , target_sr=self.sampling_rate )
_SCREAMING_SNAKE_CASE : Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def A ( self , lowerCAmelCase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE : Any = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE : str = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE : int = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_SCREAMING_SNAKE_CASE : Tuple = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
_SCREAMING_SNAKE_CASE : Any = pa.array([Audio().encode_example(__lowerCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = storage.field('bytes' )
else:
_SCREAMING_SNAKE_CASE : str = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_SCREAMING_SNAKE_CASE : str = storage.field('path' )
else:
_SCREAMING_SNAKE_CASE : int = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_SCREAMING_SNAKE_CASE : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def A ( self , lowerCAmelCase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ ):
with xopen(__lowerCAmelCase , 'rb' ) as f:
_SCREAMING_SNAKE_CASE : Any = f.read()
return bytes_
_SCREAMING_SNAKE_CASE : Optional[Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE : List[str] = pa.array(
[os.path.basename(__lowerCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
| 621 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 0 |
import math
__magic_name__ = 1_0
__magic_name__ = 7
__magic_name__ = BALLS_PER_COLOUR * NUM_COLOURS
def __magic_name__ ( lowerCAmelCase_ = 20):
'''simple docstring'''
lowerCamelCase_ : Any = math.comb(_snake_case , _snake_case)
lowerCamelCase_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _snake_case)
lowerCamelCase_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(2_0))
| 250 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(__lowerCAmelCase , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =processor(text=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 255 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.