code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''open-llama'''
def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=100000 , __UpperCAmelCase : Any=4096 , __UpperCAmelCase : Union[str, Any]=11008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[Any]="silu" , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Dict=1E-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = intermediate_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_range
_A = rms_norm_eps
_A = use_cache
_A = kwargs.pop(
"use_memorry_efficient_attention" , __UpperCAmelCase )
_A = hidden_dropout_prob
_A = attention_dropout_prob
_A = use_stable_embedding
_A = shared_input_output_embedding
_A = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ):
_A = len(set_a.intersection(__lowercase ) )
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
else:
_A = len(set_a.union(__lowercase ) )
return intersection / union
if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ):
_A = [element for element in set_a if element in set_b]
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
return len(__lowercase ) / union
else:
_A = set_a + [element for element in set_b if element not in set_a]
return len(__lowercase ) / len(__lowercase )
return len(__lowercase ) / len(__lowercase )
return None
if __name__ == "__main__":
lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 79 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase_ (_lowerCAmelCase : Dict ):
"""simple docstring"""
__UpperCamelCase : List[Any] = tmp_path / "file.csv"
__UpperCamelCase : Dict = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(lowerCAmelCase__ , "w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = tmp_path / "malformed_file.csv"
__UpperCamelCase : List[str] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(lowerCAmelCase__ , "w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = tmp_path / "csv_with_image.csv"
__UpperCamelCase : List[str] = textwrap.dedent(
F'''\\n image\n {image_file}\n ''' )
with open(lowerCAmelCase__ , "w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Any = tmp_path / "csv_with_label.csv"
__UpperCamelCase : str = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(lowerCAmelCase__ , "w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ (_lowerCAmelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = tmp_path / "csv_with_int_list.csv"
__UpperCamelCase : Tuple = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(lowerCAmelCase__ , "w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = Csv()
__UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCAmelCase__ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(lowerCAmelCase__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase_ (_lowerCAmelCase : List[str] ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
__UpperCamelCase : Optional[int] = f.read().splitlines()[1]
__UpperCamelCase : List[str] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
__UpperCamelCase : Optional[int] = csv._generate_tables([[csv_file_with_image]] )
__UpperCamelCase : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
__UpperCamelCase : Optional[int] = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase_ (_lowerCAmelCase : Dict ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
__UpperCamelCase : Union[str, Any] = f.read().splitlines()[1:]
__UpperCamelCase : List[str] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
__UpperCamelCase : List[Any] = csv._generate_tables([[csv_file_with_label]] )
__UpperCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
__UpperCamelCase : Optional[Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowerCAmelCase__ ) for label in labels]
def UpperCAmelCase_ (_lowerCAmelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase : Tuple = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(lowerCAmelCase__ ) for i in x.split()]} )
__UpperCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
__UpperCamelCase : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
__UpperCamelCase : List[str] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 360 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase_ (_lowerCAmelCase : int ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Optional[Any] = 11
__UpperCamelCase : List[str] = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__UpperCamelCase : Tuple = 10
return solutions
def UpperCAmelCase_ (_lowerCAmelCase : int = 2 ):
__UpperCamelCase : Optional[Any] = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution()) | 171 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case : str =logging.getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : torch.nn.Module ,lowerCamelCase_ : BnbQuantizationConfig ,lowerCamelCase_ : Union[str, os.PathLike] = None ,lowerCamelCase_ : Optional[Dict[str, Union[int, str, torch.device]]] = None ,lowerCamelCase_ : Optional[List[str]] = None ,lowerCamelCase_ : Optional[Dict[Union[int, str], Union[int, str]]] = None ,lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None ,lowerCamelCase_ : bool = False ,):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''')
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''')
lowerCAmelCase__ : int = []
# custom device map
if isinstance(lowerCamelCase_ ,lowerCamelCase_) and len(device_map.keys()) > 1:
lowerCAmelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ : List[Any] = get_keys_to_not_convert(lowerCamelCase_)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCamelCase_)
lowerCAmelCase__ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCamelCase_)
# compatibility with peft
lowerCAmelCase__ : Optional[Any] = load_in_abit
lowerCAmelCase__ : List[str] = load_in_abit
lowerCAmelCase__ : str = get_parameter_device(lowerCamelCase_)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''')
lowerCAmelCase__ : Union[str, Any] = replace_with_bnb_layers(lowerCamelCase_ ,lowerCamelCase_ ,modules_to_not_convert=lowerCamelCase_)
# convert param to the right dtype
lowerCAmelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase__ : Union[str, Any] = name.replace('''.weight''' ,'''''').replace('''.bias''' ,'''''')
lowerCAmelCase__ : Dict = getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(lowerCamelCase_):
param.to(lowerCamelCase_)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''')
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """)
else:
with init_empty_weights():
lowerCAmelCase__ : str = replace_with_bnb_layers(
lowerCamelCase_ ,lowerCamelCase_ ,modules_to_not_convert=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = get_quantized_model_device_map(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,max_memory=lowerCamelCase_ ,no_split_module_classes=lowerCamelCase_ ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : str = any(x in list(device_map.values()) for x in ['''cpu''', '''disk'''])
load_checkpoint_in_model(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=lowerCamelCase_ ,offload_state_dict=lowerCamelCase_ ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(lowerCamelCase_ ,device_map=lowerCamelCase_ ,offload_dir=lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Dict=None ,lowerCamelCase_ : int=None ,lowerCamelCase_ : str=None):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ : int = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''')
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''')
lowerCAmelCase__ : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Union[str, Any] = special_dtypes
lowerCAmelCase__ : Optional[int] = no_split_module_classes
lowerCAmelCase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ : Dict = get_balanced_memory(
lowerCamelCase_ ,low_zero=(device_map == '''balanced_low_0''') ,max_memory=lowerCamelCase_ ,**lowerCamelCase_ ,)
lowerCAmelCase__ : Tuple = max_memory
lowerCAmelCase__ : Dict = infer_auto_device_map(lowerCamelCase_ ,**lowerCamelCase_)
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''')
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''')
del device_map_without_some_modules
return device_map
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Tuple=None ,lowerCamelCase_ : int=None):
'''simple docstring'''
if modules_to_not_convert is None:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = _replace_with_bnb_layers(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''')
return model
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Any ,lowerCamelCase_ : List[str]=None ,lowerCamelCase_ : Optional[Any]=None ,):
'''simple docstring'''
lowerCAmelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ : Tuple = []
current_key_name.append(lowerCamelCase_)
if isinstance(lowerCamelCase_ ,nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ : int = '''.'''.join(lowerCamelCase_)
lowerCAmelCase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : int = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=lowerCamelCase_ ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : Optional[int] = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''')
lowerCAmelCase__ : List[Any] = module.weight.data
if module.bias is not None:
lowerCAmelCase__ : List[str] = module.bias.data
bnb_module.requires_grad_(lowerCamelCase_)
setattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Dict = True
if len(list(module.children())) > 0:
lowerCAmelCase__ : Tuple = _replace_with_bnb_layers(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
with init_empty_weights():
lowerCAmelCase__ : Dict = deepcopy(lowerCamelCase_) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ : str = find_tied_parameters(lowerCamelCase_)
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : List[str] = sum(list(tied_params.values()) ,[]) + list(tied_params.keys())
else:
lowerCAmelCase__ : int = sum(lowerCamelCase_ ,[])
lowerCAmelCase__ : Union[str, Any] = len(lowerCamelCase_) > 0
# Check if it is a base model
lowerCAmelCase__ : str = False
if hasattr(lowerCamelCase_ ,'''base_model_prefix'''):
lowerCAmelCase__ : int = not hasattr(lowerCamelCase_ ,model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ : Tuple = list(model.named_children())
lowerCAmelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ : Dict = set(lowerCamelCase_) - set(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = list(set(lowerCamelCase_)) + list(lowerCamelCase_)
# remove ".weight" from the keys
lowerCAmelCase__ : int = ['''.weight''', '''.bias''']
lowerCAmelCase__ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ : int = name.replace(lowerCamelCase_ ,'''''')
filtered_module_names.append(lowerCamelCase_)
return filtered_module_names
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
for m in model.modules():
if isinstance(lowerCamelCase_ ,bnb.nn.Linearabit):
return True
return False
def lowerCAmelCase__ ( lowerCamelCase_ : nn.Module):
'''simple docstring'''
return next(parameter.parameters()).device
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Any ,lowerCamelCase_ : Any ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCamelCase_ ,lowerCamelCase_ ,0 ,dtype=lowerCamelCase_ ,value=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = param_name
lowerCAmelCase__ : str = model
if "." in tensor_name:
lowerCAmelCase__ : List[Any] = tensor_name.split('''.''')
for split in splits[:-1]:
lowerCAmelCase__ : Dict = getattr(lowerCamelCase_ ,lowerCamelCase_)
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""")
lowerCAmelCase__ : Optional[int] = new_module
lowerCAmelCase__ : List[str] = splits[-1]
# offload weights
lowerCAmelCase__ : Any = False
offload_weight(module._parameters[tensor_name] ,lowerCamelCase_ ,lowerCamelCase_ ,index=lowerCamelCase_)
if hasattr(module._parameters[tensor_name] ,'''SCB'''):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace('''weight''' ,'''SCB''') ,lowerCamelCase_ ,index=lowerCamelCase_ ,)
else:
offload_weight(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,index=lowerCamelCase_)
offload_weight(lowerCamelCase_ ,param_name.replace('''weight''' ,'''SCB''') ,lowerCamelCase_ ,index=lowerCamelCase_)
set_module_tensor_to_device(lowerCamelCase_ ,lowerCamelCase_ ,'''meta''' ,dtype=lowerCamelCase_ ,value=torch.empty(*param.size()))
| 129 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : Optional[int] = is_compiled_module(SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : Optional[int] = model
UpperCamelCase__ : Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '''forward''' )
UpperCamelCase__ : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
UpperCamelCase__ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Optional[Any] = forward
if getattr(SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE ):
convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : Tuple = model
UpperCamelCase__ : Tuple = compiled_model
return model
def _a ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@contextmanager
def _a ( **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for key, value in kwargs.items():
UpperCamelCase__ : Dict = str(SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if not hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , '''__class__''' , SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = destination.setdefault(SCREAMING_SNAKE_CASE , {} )
merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[Any] = value
return destination
def _a ( SCREAMING_SNAKE_CASE : int = None ):
"""simple docstring"""
if port is None:
UpperCamelCase__ : Union[str, Any] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 146 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __A ( a ):
__A = """char"""
__A = """bpe"""
__A = """wp"""
UpperCAmelCase__ : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __A ( a ):
__A = ["""image_processor""", """char_tokenizer"""]
__A = """ViTImageProcessor"""
__A = """MgpstrTokenizer"""
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
lowerCamelCase =kwargs.pop("""feature_extractor""" )
lowerCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
lowerCamelCase =tokenizer
lowerCamelCase =AutoTokenizer.from_pretrained("""gpt2""" )
lowerCamelCase =AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCamelCase =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
lowerCamelCase =self.char_tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase =encodings["""input_ids"""]
return inputs
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase =sequences
lowerCamelCase =char_preds.size(0 )
lowerCamelCase , lowerCamelCase =self._decode_helper(UpperCAmelCase_ , """char""" )
lowerCamelCase , lowerCamelCase =self._decode_helper(UpperCAmelCase_ , """bpe""" )
lowerCamelCase , lowerCamelCase =self._decode_helper(UpperCAmelCase_ , """wp""" )
lowerCamelCase =[]
lowerCamelCase =[]
for i in range(UpperCAmelCase_ ):
lowerCamelCase =[char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase =[char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase =scores.index(max(UpperCAmelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase ={}
lowerCamelCase =final_strs
lowerCamelCase =final_scores
lowerCamelCase =char_strs
lowerCamelCase =bpe_strs
lowerCamelCase =wp_strs
return out
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
if format == DecodeType.CHARACTER:
lowerCamelCase =self.char_decode
lowerCamelCase =1
lowerCamelCase ="""[s]"""
elif format == DecodeType.BPE:
lowerCamelCase =self.bpe_decode
lowerCamelCase =2
lowerCamelCase ="""#"""
elif format == DecodeType.WORDPIECE:
lowerCamelCase =self.wp_decode
lowerCamelCase =102
lowerCamelCase ="""[SEP]"""
else:
raise ValueError(f"""Format {format} is not supported.""" )
lowerCamelCase , lowerCamelCase =[], []
lowerCamelCase =pred_logits.size(0 )
lowerCamelCase =pred_logits.size(1 )
lowerCamelCase , lowerCamelCase =pred_logits.topk(1 , dim=-1 , largest=UpperCAmelCase_ , sorted=UpperCAmelCase_ )
lowerCamelCase =preds_index.view(-1 , UpperCAmelCase_ )[:, 1:]
lowerCamelCase =decoder(UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase =torch.nn.functional.softmax(UpperCAmelCase_ , dim=2 ).max(dim=2 )
lowerCamelCase =preds_max_prob[:, 1:]
for index in range(UpperCAmelCase_ ):
lowerCamelCase =preds_str[index].find(UpperCAmelCase_ )
lowerCamelCase =preds_str[index][:pred_eos]
lowerCamelCase =preds_index[index].cpu().tolist()
lowerCamelCase =pred_index.index(UpperCAmelCase_ ) if eos_token in pred_index else -1
lowerCamelCase =preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCAmelCase_ )
conf_scores.append(UpperCAmelCase_ )
return dec_strs, conf_scores
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =[seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(UpperCAmelCase_ )]
return decode_strs
def _snake_case ( self , UpperCAmelCase_ ):
return self.bpe_tokenizer.batch_decode(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =[seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(UpperCAmelCase_ )]
return decode_strs
| 262 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowercase ( _UpperCAmelCase ) -> str:
lowerCamelCase =[]
for line in lines:
lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
lowerCamelCase ="""\n""".join(_UpperCAmelCase )
# Make a hash from all this code
lowerCamelCase =full_str.encode("""utf-8""" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : str ={
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Tuple ={
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 262 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = KandinskyInpaintPipeline
__UpperCamelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__UpperCamelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__UpperCamelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_ : Any = MultilingualCLIP(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(**lowercase_)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_unet
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase_)
# create init_image
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.fromarray(np.uinta(lowercase_)).convert('''RGB''').resize((256, 256))
# create mask
SCREAMING_SNAKE_CASE_ : List[Any] = np.ones((64, 64) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : Any = 0
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = self.pipeline_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : str = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
SCREAMING_SNAKE_CASE_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
SCREAMING_SNAKE_CASE_ : Tuple = np.ones((768, 768) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Tuple = '''a hat'''
SCREAMING_SNAKE_CASE_ : List[str] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ : List[str] = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ : str = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 91 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 91 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int = CHRF.CHAR_ORDER , UpperCAmelCase_ : int = CHRF.WORD_ORDER , UpperCAmelCase_ : int = CHRF.BETA , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , ) ->Tuple:
"""simple docstring"""
snake_case_ = len(references[0] )
if any(len(UpperCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case_ = [[refs[i] for refs in references] for i in range(UpperCAmelCase_ )]
snake_case_ = CHRF(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = sb_chrf.corpus_score(UpperCAmelCase_ , UpperCAmelCase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 233 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 233 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE =Lock()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase_ : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase_ : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase_ : Optional[Any] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase_ : Union[str, Any] = Pipe()
lowercase_ : int = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase_ : Any = temp_rs
lowercase_ : Union[str, Any] = temp_rr
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
lowercase_ : Dict = Pipe()
lowercase_ : Tuple = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase_ : str = temp_rs
lowercase_ : int = temp_rr
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(
len(__SCREAMING_SNAKE_CASE ) - 1,
arr[len(__SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__( ):
lowercase_ : Optional[int] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = odd_even_transposition(__SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 321 | """simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Union[str, Any]=30 , lowerCAmelCase_ : Union[str, Any]=4_00 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
A__ : Any =size if size is not None else {"""height""": 18, """width""": 18}
A__ : List[Any] =parent
A__ : Any =batch_size
A__ : str =num_channels
A__ : Dict =image_size
A__ : int =min_resolution
A__ : int =max_resolution
A__ : Optional[Any] =do_resize
A__ : str =size
A__ : Optional[Any] =do_normalize
A__ : Optional[int] =image_mean
A__ : List[str] =image_std
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : Union[str, Any] =DPTImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A__ : int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Dict =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A__ : Optional[int] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Optional[Any] =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Dict =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 0 |
'''simple docstring'''
import string
def a ( __a ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase__ :Optional[int] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase__ :Dict = string.ascii_uppercase.find(snake_case_ )
UpperCamelCase__ :List[Any] = num - key
if num < 0:
UpperCamelCase__ :Optional[Any] = num + len(string.ascii_uppercase )
UpperCamelCase__ :Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase__ :List[str] = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = input('''Encrypted message: ''' )
UpperCamelCase__ :int = message.upper()
decrypt(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 97 | '''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_A : str = logging.get_logger(__name__)
_A : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCamelCase_ ( snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(snake_case_ , snake_case_ )
if k.startswith("""encoder""" ):
__lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" )
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowerCAmelCase = sd.pop(snake_case_ )
__lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowerCAmelCase = v
_A : int = ['''START''']
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = model["""model"""]
__lowerCAmelCase = BlenderbotConfig.from_json_file(snake_case_ )
__lowerCAmelCase = BlenderbotForConditionalGeneration(snake_case_ )
__lowerCAmelCase = m.model.state_dict().keys()
__lowerCAmelCase = []
__lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCAmelCase = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_ , strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_A : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 229 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase ( a__ : int = 8 ) -> str:
_UpperCamelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(a__ ) for _ in range(a__ ) )
def lowercase ( a__ : str , a__ : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(a__ )
_UpperCamelCase = i // 3
_UpperCamelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_UpperCamelCase = (
chars_incl
+ random(a__ , quotient + remainder )
+ random(a__ , a__ )
+ random(a__ , a__ )
)
_UpperCamelCase = list(a__ )
shuffle(a__ )
return "".join(a__ )
# random is a generalised function for letters, characters and numbers
def lowercase ( a__ : str , a__ : int ) -> str:
return "".join(secrets.choice(a__ ) for _ in range(a__ ) )
def lowercase ( a__ : Any , a__ : Optional[Any] ) -> List[str]:
pass # Put your code here...
def lowercase ( a__ : str , a__ : Optional[int] ) -> Any:
pass # Put your code here...
def lowercase ( a__ : Optional[int] , a__ : str ) -> Optional[int]:
pass # Put your code here...
def lowercase ( a__ : str , a__ : int = 8 ) -> bool:
if len(a__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_UpperCamelCase = any(char in ascii_uppercase for char in password )
_UpperCamelCase = any(char in ascii_lowercase for char in password )
_UpperCamelCase = any(char in digits for char in password )
_UpperCamelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase ( ) -> int:
_UpperCamelCase = int(input('''Please indicate the max length of your password: ''' ).strip() )
_UpperCamelCase = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(a__ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(a__ , a__ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 367 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : str ) -> int:
_UpperCamelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_UpperCamelCase = [144, 192, 240]
_UpperCamelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_UpperCamelCase = [96, 120, 144]
_UpperCamelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_UpperCamelCase = [64, 80, 96]
_UpperCamelCase = [16, 16, 24, 48, 64, 80, 320]
_UpperCamelCase = 0.05
_UpperCamelCase = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 21
_UpperCamelCase = '''pascal-voc-id2label.json'''
else:
_UpperCamelCase = 1000
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowercase ( a__ : Union[str, Any] , a__ : Optional[int]=False ) -> int:
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
_UpperCamelCase = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
_UpperCamelCase = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_UpperCamelCase = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_UpperCamelCase = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_UpperCamelCase = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_UpperCamelCase = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_UpperCamelCase = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_UpperCamelCase = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_UpperCamelCase = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_UpperCamelCase = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_UpperCamelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_UpperCamelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
_UpperCamelCase = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_UpperCamelCase = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_UpperCamelCase = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
_UpperCamelCase = name.replace(F'''.global_rep.{i}.weight''' , '''.layernorm.weight''' )
if F'''.global_rep.{i}.bias''' in name:
_UpperCamelCase = name.replace(F'''.global_rep.{i}.bias''' , '''.layernorm.bias''' )
if ".global_rep." in name:
_UpperCamelCase = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_UpperCamelCase = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_UpperCamelCase = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_UpperCamelCase = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_UpperCamelCase = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_UpperCamelCase = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_UpperCamelCase = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_UpperCamelCase = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_UpperCamelCase = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_UpperCamelCase = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_UpperCamelCase = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_UpperCamelCase = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_UpperCamelCase = '''mobilevit.''' + name
return name
def lowercase ( a__ : Union[str, Any] , a__ : List[Any] , a__ : Tuple=False ) -> Optional[Any]:
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(a__ )
if key[:8] == "encoder.":
_UpperCamelCase = key[8:]
if "qkv" in key:
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_split[0][6:] ) - 1
_UpperCamelCase = int(key_split[3] )
_UpperCamelCase = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
_UpperCamelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_UpperCamelCase = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[dim : dim * 2, :]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[:dim]
_UpperCamelCase = val[dim : dim * 2]
_UpperCamelCase = val[-dim:]
else:
_UpperCamelCase = val
return orig_state_dict
def lowercase ( ) -> Dict:
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowercase ( a__ : Dict , a__ : Optional[int] , a__ : Any , a__ : Tuple=False ) -> Any:
_UpperCamelCase = get_mobilevit_config(a__ )
# load original state_dict
_UpperCamelCase = torch.load(a__ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_UpperCamelCase = MobileViTForSemanticSegmentation(a__ ).eval()
else:
_UpperCamelCase = MobileViTForImageClassification(a__ ).eval()
_UpperCamelCase = convert_state_dict(a__ , a__ )
model.load_state_dict(a__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCamelCase = model(**a__ )
_UpperCamelCase = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_UpperCamelCase = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_UpperCamelCase = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_UpperCamelCase = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_UpperCamelCase = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_UpperCamelCase = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , a__ , atol=1e-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if push_to_hub:
_UpperCamelCase = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_UpperCamelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(a__ , organization='''apple''' )
model.push_to_hub(a__ , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 54 | 0 |
from __future__ import annotations
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 101 |
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]:
"""simple docstring"""
a_ : Any = int(__A )
# Initialize Result
a_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'Following is minimal change for {value}: ')
UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 32 | 0 |
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = RobertaTokenizer
_UpperCAmelCase : Dict = RobertaTokenizerFast
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Any = {"cls_token": "<s>"}
def A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowercase , range(len(lowercase ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A ( self : List[str] , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : List[str] , **lowercase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = 'lower newer'
_snake_case = 'lower newer'
return input_text, output_text
def A ( self : str ):
'''simple docstring'''
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = 'lower newer'
_snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizer()
_snake_case = 'Encode this sequence.'
_snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
_snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
_snake_case = 'Encode <mask> sequence'
_snake_case = 'Encode <mask>sequence'
_snake_case = tokenizer.encode(lowercase )
_snake_case = encoded.index(lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
_snake_case = tokenizer.encode(lowercase )
_snake_case = encoded.index(lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_snake_case = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A ( self : str ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f'''{text_of_1_token} {text_of_1_token}'''
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , ) | 130 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
raise RuntimeError("""CUDA out of memory.""" )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
UpperCamelCase :Any = nn.Linear(3 , 4 )
UpperCamelCase :Optional[Any] = nn.BatchNormad(4 )
UpperCamelCase :Any = nn.Linear(4 , 5 )
def _A ( self : Any , __lowerCamelCase : str ):
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
UpperCamelCase :Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase :str = mock_training_loop_function("""hello""" )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _A ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : List[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _A ( self : Optional[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _A ( self : Optional[int] ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _A ( self : Optional[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : str ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _A ( self : str ):
UpperCamelCase :List[str] = torch.cuda.memory_allocated()
UpperCamelCase :Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
UpperCamelCase :List[Any] = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 38 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Tuple = ShapEImgaImgPipeline
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Union[str, Any] ):
return 8
@property
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : str ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase :str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : str ):
UpperCamelCase :int = self.dummy_prior
UpperCamelCase :Any = self.dummy_image_encoder
UpperCamelCase :Dict = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ):
UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Dict = output.images[0]
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : List[Any] ):
UpperCamelCase :str = torch_device == """cpu"""
UpperCamelCase :int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = 1
UpperCamelCase :int = 2
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :str = batch_size * [inputs[key]]
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 38 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( A__ = 200_0000 ) -> int:
a__ : list[int] = [0]
a__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a__ : int = 0
# the area corresponding to the grid that gives the product closest to target
a__ : int = 0
# an estimate of b, using the quadratic formula
a__ : float
# the largest integer less than b_estimate
a__ : int
# the largest integer less than b_estimate
a__ : int
# the triangle number corresponding to b_floor
a__ : int
# the triangle number corresponding to b_ceil
a__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a__ : Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a__ : Optional[int] = floor(A__ )
a__ : Tuple = ceil(A__ )
a__ : Union[str, Any] = triangle_numbers[b_floor]
a__ : Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a__ : Optional[Any] = triangle_b_first_guess * triangle_a
a__ : Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a__ : List[str] = triangle_b_second_guess * triangle_a
a__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 354 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( A__ ) -> Dict:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase)
a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = after_output[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[int] = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size)
a__ : Dict = to_atuple(vision_model.config.patch_size)
a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
a__ : Optional[int] = model_a(**lowercase)
a__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : int = model_a(**lowercase)
a__ : str = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a__ : str = 13
a__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Optional[int] = random_attention_mask([batch_size, 4])
a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model')
a__ : Tuple = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFViTModelTester(self)
a__ : int = TFBertModelTester(self)
a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Any = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a__ : Union[str, Any] = 13
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Any = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : int = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : str = to_atuple(vision_model.config.patch_size)
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = TFDeiTModel(lowercase , name='vision_model')
a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self)
a__ : str = TFRobertaModelTester(self)
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a__ : Optional[int] = 13
a__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFCLIPVisionModel(lowercase , name='vision_model')
a__ : str = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = TFCLIPVisionModelTester(self)
a__ : Dict = TFBertModelTester(self)
a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Union[str, Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase)
a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : int = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
| 225 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( snake_case_ ):
lowerCamelCase : str = '''data2vec-text'''
def __init__( self : int , UpperCAmelCase__ : Optional[Any]=3_0_5_2_2 , UpperCAmelCase__ : List[str]=7_6_8 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Optional[int]=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : int=1E-12 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Tuple , ) -> Optional[int]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( snake_case_ ):
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 353 |
'''simple docstring'''
def a_ ( ):
lowerCAmelCase = []
lowerCAmelCase = 1
while len(lowerCamelCase ) < 1e6:
constant.append(str(lowerCamelCase ) )
i += 1
lowerCAmelCase = ''.join(lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self) ->Union[str, Any]:
torch.manual_seed(0)
a_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.dummy_uncond_unet
a_ = KarrasVeScheduler()
a_ = KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = torch.manual_seed(0)
a_ = pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="numpy").images
a_ = torch.manual_seed(0)
a_ = pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="numpy" , return_dict=__UpperCAmelCase)[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = "google/ncsnpp-celebahq-256"
a_ = UNetaDModel.from_pretrained(__UpperCAmelCase)
a_ = KarrasVeScheduler()
a_ = KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = torch.manual_seed(0)
a_ = pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type="numpy").images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 | 243 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 243 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowercase : int = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
try:
with open(UpperCAmelCase__ , """rb""" ) as flax_state_f:
lowercase_ : Union[str, Any] = from_bytes(UpperCAmelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCAmelCase__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowercase_ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase__ : x.dtype == jnp.bfloataa , UpperCAmelCase__ ) ).values()
if any(UpperCAmelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowercase_ : List[str] = jax.tree_util.tree_map(
lambda UpperCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase__ )
lowercase_ : str = """"""
lowercase_ : Dict = flatten_dict(UpperCAmelCase__ , sep=""".""" )
lowercase_ : Tuple = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase_ : Tuple = []
lowercase_ : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase_ : Dict = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
lowercase_ : Tuple = jnp.transpose(UpperCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase_ : List[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
lowercase_ : List[str] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCAmelCase__ ):
lowercase_ : Dict = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowercase_ : Tuple = """.""".join(UpperCAmelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowercase_ : Any = np.asarray(UpperCAmelCase__ ) if not isinstance(UpperCAmelCase__ , np.ndarray ) else flax_tensor
lowercase_ : Optional[Any] = torch.from_numpy(UpperCAmelCase__ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase__ )
pt_model.load_state_dict(UpperCAmelCase__ )
# re-transform missing_keys to list
lowercase_ : str = list(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 371 | '''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[int] = KandinskyVaaPriorPipeline
_UpperCAmelCase : str = ["prompt"]
_UpperCAmelCase : int = ["prompt", "negative_prompt"]
_UpperCAmelCase : List[Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_UpperCAmelCase : Tuple = False
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Any:
return 3_2
@property
def __lowerCamelCase ( self : List[str] ) ->List[str]:
return 3_2
@property
def __lowerCamelCase ( self : str ) ->Dict:
return self.time_input_dim
@property
def __lowerCamelCase ( self : Any ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
return 1_0_0
@property
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(A )
@property
def __lowerCamelCase ( self : int ) ->List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowerCamelCase__ : Optional[int] = PriorTransformer(**A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCamelCase__ : Dict = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCamelCase ( self : Tuple ) ->Tuple:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCamelCase__ : Union[str, Any] = CLIPVisionModelWithProjection(A )
return model
@property
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Tuple = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : str = self.dummy_prior
lowerCamelCase__ : int = self.dummy_image_encoder
lowerCamelCase__ : List[Any] = self.dummy_text_encoder
lowerCamelCase__ : Tuple = self.dummy_tokenizer
lowerCamelCase__ : Optional[int] = self.dummy_image_processor
lowerCamelCase__ : Dict = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=A , clip_sample_range=10.0 , )
lowerCamelCase__ : Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCamelCase ( self : Union[str, Any] , A : Any , A : Any=0 ) ->int:
if str(A ).startswith('''mps''' ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(A )
else:
lowerCamelCase__ : Any = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase__ : Any = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : int = '''cpu'''
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : str = self.pipeline_class(**A )
lowerCamelCase__ : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Union[str, Any] = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase__ : Optional[Any] = output.image_embeds
lowerCamelCase__ : Any = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowerCamelCase__ : Tuple = image[0, -1_0:]
lowerCamelCase__ : Any = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCamelCase__ : Any = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase__ : Dict = torch_device == '''cpu'''
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = False
self._test_inference_batch_single_identical(
test_max_difference=A , relax_max_difference=A , test_mean_pixel_difference=A , )
@skip_mps
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ : Optional[int] = torch_device == '''cpu'''
lowerCamelCase__ : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=A , test_mean_pixel_difference=A , )
| 142 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297 | 1 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: int =str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowerCamelCase_ ) )[2:]
SCREAMING_SNAKE_CASE_: List[Any] =max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
snake_case__ : int = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : Tuple = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : Any = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Any = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Optional[int] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Optional[int] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Any = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : str = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : str = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : List[str] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : List[str] = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : Any = f.readlines()
lowerCAmelCase : List[Any] = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : Optional[Any] = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : List[str] = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Optional[Any] = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : str = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : Dict = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : str = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Union[str, Any] = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Union[str, Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : str = lines[line_index]
lowerCAmelCase : Any = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : int ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : str = []
for key in import_dict_objects.keys():
lowerCAmelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Union[str, Any] = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : Tuple = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Union[str, Any] = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : Optional[Any] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Any = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : int = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : List[str] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase : Optional[Any] = direct_transformers_import(_snake_case )
lowerCAmelCase : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
lowerCAmelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
lowerCAmelCase : Optional[int] = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 366 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
UpperCamelCase__ = 4 , UpperCamelCase__ = 768 , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
super().__init__()
lowerCamelCase : List[str] = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
lowerCamelCase : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
lowerCamelCase : Dict = clip_extra_context_tokens
lowerCamelCase : Tuple = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
lowerCamelCase : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = nn.LayerNorm(UpperCamelCase__ )
def _lowercase ( self , *, UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCamelCase : Union[str, Any] = image_embeddings.shape[0]
lowerCamelCase : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCamelCase : Optional[Any] = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
lowerCamelCase : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCamelCase : Union[str, Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCamelCase : str = self.embedding_proj(UpperCamelCase__ )
lowerCamelCase : str = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
lowerCamelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCamelCase : str = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
lowerCamelCase : Any = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
lowerCamelCase : Optional[int] = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCamelCase : Optional[Any] = self.encoder_hidden_states_proj(UpperCamelCase__ )
lowerCamelCase : List[str] = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
lowerCamelCase : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 48 |
"""simple docstring"""
from math import pow, sqrt
def lowerCamelCase__ ( *_lowerCamelCase : float ) -> bool:
lowerCamelCase_ = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 183 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
snake_case__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_000 , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase__ = CLIPTextModel(_a )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Union[str, Any]:
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase__ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(_a )
else:
lowerCAmelCase__ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase__ = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = CycleDiffusionPipeline(**_a )
lowerCAmelCase__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ = self.get_dummy_inputs(_a )
lowerCAmelCase__ = pipe(**_a )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_a , "half" ):
lowerCAmelCase__ = module.half()
lowerCAmelCase__ = CycleDiffusionPipeline(**_a )
lowerCAmelCase__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ = self.get_dummy_inputs(_a )
lowerCAmelCase__ = pipe(**_a )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a ( self : List[Any] ) -> int:
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def a ( self : List[Any] ) -> Any:
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : Optional[Any] ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : List[str] ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def a ( self : List[str] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
lowerCAmelCase__ = init_image.resize((512, 512) )
lowerCAmelCase__ = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase__ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
lowerCAmelCase__ = CycleDiffusionPipeline.from_pretrained(
_a , scheduler=_a , safety_checker=_a , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
lowerCAmelCase__ = """A black colored car"""
lowerCAmelCase__ = """A blue colored car"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
lowerCAmelCase__ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
lowerCAmelCase__ = init_image.resize((512, 512) )
lowerCAmelCase__ = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase__ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
lowerCAmelCase__ = CycleDiffusionPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
lowerCAmelCase__ = """A black colored car"""
lowerCAmelCase__ = """A blue colored car"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
lowerCAmelCase__ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 362 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = 'PoolFormerConfig'
# Base docstring
UpperCamelCase = 'sail/poolformer_s12'
UpperCamelCase = [1, 512, 7, 7]
# Image classification docstring
UpperCamelCase = 'sail/poolformer_s12'
UpperCamelCase = 'tabby, tabby cat'
UpperCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase__ = 1 - drop_prob
lowerCAmelCase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase__ = keep_prob + torch.rand(lowerCAmelCase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase__ = input.div(lowerCAmelCase_ ) * random_tensor
return output
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[float] = None ) -> None:
super().__init__()
lowerCAmelCase__ = drop_prob
def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def a ( self : Optional[Any] ) -> str:
return "p={}".format(self.drop_prob )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=None ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase__ = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase__ = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.projection(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class __lowerCamelCase ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
super().__init__()
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ = config.hidden_act
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.act_fn(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase__ = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int:
if self.use_layer_scale:
lowerCAmelCase__ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ()
lowerCAmelCase__ = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = (output,) + outputs
return outputs
else:
lowerCAmelCase__ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
lowerCAmelCase__ = pooling_output + hidden_states
lowerCAmelCase__ = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase__ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
lowerCAmelCase__ = hidden_states + layer_output
lowerCAmelCase__ = (output,) + outputs
return outputs
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
super().__init__()
lowerCAmelCase__ = config
# stochastic depth decay rule
lowerCAmelCase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Dict:
lowerCAmelCase__ = () if output_hidden_states else None
lowerCAmelCase__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase__ , lowerCAmelCase__ = layers
# Get patch embeddings from hidden_states
lowerCAmelCase__ = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = blk(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = PoolFormerConfig
snake_case__ = "poolformer"
snake_case__ = "pixel_values"
snake_case__ = True
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = value
UpperCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def a ( self : Optional[int] ) -> Optional[Any]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCAmelCase__ = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase__ = nn.Linear(config.hidden_size , config.hidden_size )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
lowerCAmelCase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = outputs[0]
lowerCAmelCase__ = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = "single_label_classification"
else:
lowerCAmelCase__ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 221 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Any = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 32 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = '''convbert'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = vocab_size
a_ : List[str] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : int = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Optional[int] = embedding_size
a_ : List[Any] = head_ratio
a_ : List[Any] = conv_kernel_size
a_ : Tuple = num_groups
a_ : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 32 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase = model.generate(**UpperCAmelCase__ )
lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase = model_reloaded.generate(**UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCAmelCase__ ):
model.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(UpperCAmelCase__ )
| 55 |
'''simple docstring'''
def a_ ( lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if b < 0:
return 1 / actual_power(__lowerCamelCase , __lowerCamelCase )
return actual_power(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 19 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ):
snake_case : int = {}
if train_file is not None:
snake_case : List[Any] = [train_file]
if eval_file is not None:
snake_case : Optional[int] = [eval_file]
if test_file is not None:
snake_case : Any = [test_file]
snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase )
snake_case : str = list(ds[list(files.keys() )[0]].features.keys() )
snake_case : int = features_name.pop(__lowerCamelCase )
snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )}
snake_case : List[Any] = tokenizer.model_input_names
snake_case : List[Any] = {}
if len(__lowerCamelCase ) == 1:
for k in files.keys():
snake_case : Tuple = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , )
elif len(__lowerCamelCase ) == 2:
for k in files.keys():
snake_case : List[Any] = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case : int = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case : Tuple = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
A__ : int = field(metadata={"help": "Which column contains the label"} )
A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} )
A__ : int = field(
default=1_28 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
A__ : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase :
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict:
snake_case : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case : int = TFTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : Any = trainer.evaluate()
snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 59 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : Union[str, Any] ="0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=None) -> Any:
"""simple docstring"""
if rng is None:
a__ : Tuple = random.Random()
a__ : str = 1
for dim in shape:
total_dims *= dim
a__ : Dict = []
for _ in range(_lowercase):
values.append(rng.randint(0 , vocab_size - 1))
a__ : int = np.array(_lowercase , dtype=jnp.intaa).reshape(_lowercase)
return output
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> int:
"""simple docstring"""
a__ : Dict = ids_tensor(_lowercase , vocab_size=2 , rng=_lowercase)
# make sure that at least one token is attended to for each batch
a__ : Union[str, Any] = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :Any = None
__lowerCAmelCase :int = ()
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a__ : Optional[int] = 2
a__ : str = inputs["""input_ids"""].shape[-1] // 2
a__ : Dict = inputs["""input_ids"""][:max_batch_size, :sequence_length]
a__ : int = jnp.ones_like(__lowercase )
a__ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a__ : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a__ : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[str] = self._get_input_ids_and_config()
a__ : List[str] = False
a__ : Optional[int] = max_length
a__ : List[Any] = 0
for model_class in self.all_generative_model_classes:
a__ : Optional[int] = model_class(__lowercase )
a__ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
a__ : int = getattr(__lowercase , __lowercase )
a__ : Any = pt_model_class(__lowercase ).eval()
a__ : Union[str, Any] = load_flax_weights_in_pytorch_model(__lowercase , flax_model.params )
a__ : Optional[Any] = flax_model.generate(__lowercase ).sequences
a__ : List[str] = pt_model.generate(torch.tensor(__lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a__ : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self._get_input_ids_and_config()
a__ : str = False
a__ : Dict = max_length
for model_class in self.all_generative_model_classes:
a__ : str = model_class(__lowercase )
a__ : Dict = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : str = jit(model.generate )
a__ : Optional[Any] = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = self._get_input_ids_and_config()
a__ : Any = True
a__ : int = max_length
for model_class in self.all_generative_model_classes:
a__ : Optional[Any] = model_class(__lowercase )
a__ : List[Any] = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : List[Any] = jit(model.generate )
a__ : int = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self._get_input_ids_and_config()
a__ : Union[str, Any] = False
a__ : int = max_length
a__ : Any = 2
for model_class in self.all_generative_model_classes:
a__ : int = model_class(__lowercase )
a__ : Tuple = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : Dict = jit(model.generate )
a__ : Tuple = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = self._get_input_ids_and_config()
a__ : Union[str, Any] = False
a__ : Tuple = max_length
a__ : int = 2
a__ : Dict = 2
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(__lowercase )
a__ : List[Any] = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = self._get_input_ids_and_config()
a__ : str = True
a__ : Tuple = max_length
a__ : Optional[Any] = 0.8
a__ : Any = 1_0
a__ : List[Any] = 0.3
a__ : List[str] = 1
a__ : Optional[int] = 8
a__ : List[str] = 9
for model_class in self.all_generative_model_classes:
a__ : Optional[int] = model_class(__lowercase )
a__ : Any = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : Optional[int] = jit(model.generate )
a__ : int = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = self._get_input_ids_and_config()
a__ : List[str] = max_length
a__ : Tuple = 1
a__ : Optional[int] = 8
a__ : Any = 9
for model_class in self.all_generative_model_classes:
a__ : List[Any] = model_class(__lowercase )
a__ : Any = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : Optional[Any] = jit(model.generate )
a__ : Tuple = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : int = self._get_input_ids_and_config()
a__ : Tuple = max_length
a__ : List[Any] = 2
a__ : Optional[Any] = 1
a__ : List[str] = 8
a__ : Tuple = 9
for model_class in self.all_generative_model_classes:
a__ : str = model_class(__lowercase )
a__ : Tuple = model.generate(__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : Any = jit(model.generate )
a__ : Optional[int] = jit_generate(__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : str = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : str = attention_mask.at[(0, 0)].set(0 )
a__ : Optional[Any] = False
a__ : int = max_length
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(__lowercase )
a__ : List[Any] = model.generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : List[str] = jit(model.generate )
a__ : Optional[int] = jit_generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
a__ : Dict = True
a__ : Dict = max_length
for model_class in self.all_generative_model_classes:
a__ : Dict = model_class(__lowercase )
a__ : Dict = model.generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : List[str] = jit(model.generate )
a__ : Union[str, Any] = jit_generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : int = attention_mask.at[(0, 0)].set(0 )
a__ : Any = 2
a__ : int = max_length
for model_class in self.all_generative_model_classes:
a__ : List[Any] = model_class(__lowercase )
a__ : Union[str, Any] = model.generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase )
a__ : Any = jit(model.generate )
a__ : List[str] = jit_generate(__lowercase , attention_mask=__lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
a__ : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
a__ : List[Any] = """Hello world"""
a__ : Dict = tokenizer(__lowercase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowercase , """do_samples""" ):
model.generate(__lowercase , do_samples=__lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowercase , """foo""" ):
a__ : Any = {"""foo""": """bar"""}
model.generate(__lowercase , **__lowercase )
| 351 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
UpperCAmelCase_ : Tuple = {
'openbmb/cpm-ant-10b': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = collections.OrderedDict()
with open(__A , 'r' , encoding='utf-8' ) as reader:
a_ : int = reader.readlines()
for index, token in enumerate(__A ):
a_ : Union[str, Any] = token.rstrip('\n' )
a_ : Union[str, Any] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_0 ) -> List[str]:
a_ : List[Any] = vocab
a_ : Tuple = unk_token
a_ : Tuple = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : Any = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.max_input_chars_per_word:
return [self.unk_token]
a_ : Tuple = 0
a_ : Union[str, Any] = []
while start < len(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
while start < end:
a_ : Dict = ''.join(chars[start:end] )
if substr in self.vocab:
a_ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['''input_ids''', '''attention_mask''']
snake_case__ : Union[str, Any] = False
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict="<d>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</d>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : str="</n>" , SCREAMING_SNAKE_CASE__ : Any="</_>" , SCREAMING_SNAKE_CASE__ : Tuple="left" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Union[str, Any]:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ , eod_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , line_token=SCREAMING_SNAKE_CASE__ , space_token=SCREAMING_SNAKE_CASE__ , padding_side=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = bod_token
a_ : str = eod_token
a_ : Optional[int] = load_vocab(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.encoder[space_token]
a_ : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
a_ : Union[str, Any] = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ , cut_all=SCREAMING_SNAKE_CASE__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
a_ : Optional[Any] = [i for i in token_ids if i >= 0]
a_ : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return "".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a_ : str = (filename_prefix + '-' if filename_prefix else '') + save_directory
a_ : int = 0
if " " in self.encoder:
a_ : List[str] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a_ : Union[str, Any] = self.encoder['\n']
del self.encoder["\n"]
a_ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a_ : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
| 32 |
"""simple docstring"""
UpperCamelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCamelCase_ = ['a', 'b', 'c', 'd', 'e']
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = start
# add current to visited
visited.append(UpperCAmelCase )
a_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ = topological_sort('a', [], [])
print(sort) | 243 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A =get_logger(__name__)
__A =Path(__file__).parent / 'model_card_template.md'
__A =uuida().hex
__A =os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__A =os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__A =HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _UpperCamelCase ( UpperCamelCase__ = None ):
UpperCAmelCase__ : List[Any] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
ua += "; " + user_agent
return ua
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ):
if token is None:
UpperCAmelCase__ : List[str] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Dict = whoami(UpperCamelCase__ )["""name"""]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(UpperCamelCase__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(UpperCamelCase__ , """hub_token""" ) else None
UpperCAmelCase__ : Optional[int] = get_full_repo_name(UpperCamelCase__ , token=UpperCamelCase__ )
UpperCAmelCase__ : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase__ , model_name=UpperCamelCase__ , repo_name=UpperCamelCase__ , dataset_name=args.dataset_name if hasattr(UpperCamelCase__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Union[str, Any] = str(Path(UpperCamelCase__ ).as_posix() )
UpperCAmelCase__ : List[Any] = re.search(R"""snapshots/([^/]+)/""" , UpperCamelCase__ )
if search is None:
return None
UpperCAmelCase__ : Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A =os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__A =os.path.join(hf_cache_home, 'diffusers')
def _UpperCamelCase ( UpperCamelCase__ = None , UpperCamelCase__ = None ):
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : Optional[int] = old_diffusers_cache
UpperCAmelCase__ : str = Path(UpperCamelCase__ ).expanduser()
UpperCAmelCase__ : Union[str, Any] = Path(UpperCamelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : List[Any] = new_cache_dir / old_blob_path.relative_to(UpperCamelCase__ )
new_blob_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
os.replace(UpperCamelCase__ , UpperCamelCase__ )
try:
os.symlink(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A =os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__A =0
else:
with open(cache_version_file) as f:
try:
__A =int(f.read())
except ValueError:
__A =0
if cache_version < 1:
__A =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__A ='\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None ):
if variant is not None:
UpperCAmelCase__ : int = weights_name.split(""".""" )
UpperCAmelCase__ : str = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Dict = """.""".join(UpperCamelCase__ )
return weights_name
def _UpperCamelCase ( UpperCamelCase__ , *,
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ):
UpperCAmelCase__ : List[str] = str(UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase__ ):
if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
UpperCAmelCase__ : Dict = os.path.join(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCAmelCase__ : Optional[Any] = hf_hub_download(
UpperCamelCase__ , filename=_add_variant(UpperCamelCase__ , UpperCamelCase__ ) , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , user_agent=UpperCamelCase__ , subfolder=UpperCamelCase__ , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , UpperCamelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase__ , UpperCamelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase__ , UpperCamelCase__ )}\' so that the correct variant file can be added.''' , UpperCamelCase__ , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Optional[int] = hf_hub_download(
UpperCamelCase__ , filename=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , user_agent=UpperCamelCase__ , subfolder=UpperCamelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 283 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = x
UpperCAmelCase__ : Optional[int] = y
for step in range(UpperCamelCase__ ): # noqa: B007
UpperCAmelCase__ : List[str] = a * a - b * b + x
UpperCAmelCase__ : Optional[int] = 2 * a * b + y
UpperCAmelCase__ : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( UpperCamelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(UpperCamelCase__ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase__ = 8_0_0 , UpperCamelCase__ = 6_0_0 , UpperCamelCase__ = -0.6 , UpperCamelCase__ = 0 , UpperCamelCase__ = 3.2 , UpperCamelCase__ = 5_0 , UpperCamelCase__ = True , ):
UpperCAmelCase__ : str = Image.new("""RGB""" , (image_width, image_height) )
UpperCAmelCase__ : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase__ ):
for image_y in range(UpperCamelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase__ : Union[str, Any] = figure_width / image_width * image_height
UpperCAmelCase__ : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase__ : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase__ : List[str] = get_distance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase__ : Any = get_color_coded_rgb(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = get_black_and_white_rgb(UpperCamelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__A =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 283 | 1 |
from math import ceil, sqrt
def lowercase_ ( _A : str = 1000000 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__ : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__ : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 184 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49 | 0 |
from __future__ import annotations
def snake_case_ (__A : int | float | str , __A : int | float | str ) -> List[Any]:
"""simple docstring"""
if nth_term == "":
return [""]
__lowerCAmelCase : Optional[Any] = int(_a )
__lowerCAmelCase : Dict = int(_a )
__lowerCAmelCase : Any = []
for temp in range(int(_a ) ):
series.append(f'''1 / {pow(temp + 1 , int(_a ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("""Enter the last number (nth term) of the P-Series"""))
__UpperCAmelCase = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 361 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=19 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : Tuple = num_choices
__lowerCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Any = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
__lowerCAmelCase : str = model(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =False
lowerCamelCase : int =(EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase : int =()
lowerCamelCase : Dict ={} if is_torch_available() else {}
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = EsmFoldModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip("""Does not support attention outputs""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold only has one output format.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCAmelCase : str = model(lowerCAmelCase )["""positions"""]
__lowerCAmelCase : str = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1e-4 ) )
| 139 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE : Optional[int] = 16
SCREAMING_SNAKE_CASE : List[Any] = 32
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = 16 ) -> Optional[Any]:
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowercase : Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase : Union[str, Any] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_lowercase : Any = 8
else:
_lowercase : List[Any] = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
_lowercase : str = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ )
_lowercase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialize accelerator
_lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : Tuple = config['lr']
_lowercase : Any = int(config['num_epochs'] )
_lowercase : Optional[int] = int(config['seed'] )
_lowercase : List[str] = int(config['batch_size'] )
_lowercase : List[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowercase : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_lowercase : Any = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
_lowercase , _lowercase : Optional[int] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowercase : str = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
_lowercase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase : str = model(**lowerCamelCase_ )
_lowercase : Tuple = outputs.loss
_lowercase : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : List[str] = model(**lowerCamelCase_ )
_lowercase : Dict = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
_lowercase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ )
def UpperCamelCase_( ) -> Tuple:
_lowercase : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowercase : Optional[int] = parser.parse_args()
_lowercase : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 0 |
"""simple docstring"""
__A = 9.8_0665
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 2 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'pegasus'
A_ : Union[str, Any] = ['past_key_values']
A_ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Any , a__ : Optional[int]=5_0265 , a__ : int=1024 , a__ : List[str]=12 , a__ : List[Any]=4096 , a__ : List[str]=16 , a__ : Any=12 , a__ : Union[str, Any]=4096 , a__ : Tuple=16 , a__ : Optional[Any]=0.0 , a__ : Any=0.0 , a__ : List[str]=True , a__ : Optional[int]=True , a__ : Union[str, Any]="gelu" , a__ : List[str]=1024 , a__ : int=0.1 , a__ : List[str]=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : Optional[int]=0 , a__ : Tuple=False , a__ : List[Any]=0 , a__ : Any=1 , a__ : List[Any]=1 , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def a (self : Any ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a (self : Tuple ):
"""simple docstring"""
return self.d_model
| 24 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase : Union[str, Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = list(s_dict.keys() )
for key in keys:
lowerCamelCase = R""".*/layers_(\d+)"""
lowerCamelCase = key
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , lowerCamelCase__ )
lowerCamelCase = R"""(encoder|decoder)\/"""
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = re.match(lowerCamelCase__ , lowerCamelCase__ ).groups()
if groups[0] == "encoder":
lowerCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , lowerCamelCase__ )
lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , lowerCamelCase__ )
elif groups[0] == "decoder":
lowerCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , lowerCamelCase__ )
lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , lowerCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase = new_key.replace(lowerCamelCase__ , lowerCamelCase__ )
print(f'{key} -> {new_key}' )
lowerCamelCase = s_dict.pop(lowerCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase = s_dict[key].shape[0]
lowerCamelCase = s_dict[key]
for idx in range(lowerCamelCase__ ):
lowerCamelCase = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCamelCase__ )
return s_dict
UpperCAmelCase : Tuple = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
import regex as re
with open(lowerCamelCase__ , """r""" ) as f:
lowerCamelCase = f.read()
lowerCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , lowerCamelCase__ )
lowerCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase = float(lowerCamelCase__ ) if """.""" in value else int(lowerCamelCase__ )
lowerCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , lowerCamelCase__ )[0]
lowerCamelCase = str(activation[1] )
lowerCamelCase = num_experts
lowerCamelCase = SwitchTransformersConfig(**lowerCamelCase__ )
return config
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]="./" , lowerCamelCase__ : List[str]=8 ):
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCamelCase = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
if gin_file is not None:
lowerCamelCase = convert_gin_to_config(lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCamelCase = SwitchTransformersConfig.from_pretrained(lowerCamelCase__ )
lowerCamelCase = SwitchTransformersForConditionalGeneration(lowerCamelCase__ )
lowerCamelCase = flax_params["""target"""]
lowerCamelCase = flatten_dict(lowerCamelCase__ , sep="""/""" )
lowerCamelCase = rename_keys(lowerCamelCase__ )
lowerCamelCase = unflatten_dict(lowerCamelCase__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCamelCase__ , lowerCamelCase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
UpperCAmelCase : List[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 350 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
UpperCamelCase : int = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , A , A , A = None , A = 5_02_57 , A = 10_24 , A = 7_68 , A = 12 , A = 12 , A = None , A = "gelu_new" , A = 0.1 , A = 0.1 , A = 0.1 , A = 1e-5 , A = 0.02 , A = True , A = True , A = False , A = False , ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=A , n_positions=A , n_embd=A , n_layer=A , n_head=A , n_inner=A , activation_function=A , resid_pdrop=A , embd_pdrop=A , attn_pdrop=A , layer_norm_epsilon=A , initializer_range=A , scale_attn_weights=A , use_cache=A , scale_attn_by_inverse_layer_idx=A , reorder_and_upcast_attn=A , )
lowerCamelCase = GPTaLMHeadModel(A )
def __A ( self , A , A , A = None , A = None , ) -> Any:
'''simple docstring'''
lowerCamelCase = self.transformer.transformer.wte(A )
lowerCamelCase = self.encode_prefix(A )
lowerCamelCase = self.decode_prefix(A )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=A , labels=A , attention_mask=A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A , A ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(A , self.prefix_length , dtype=torch.intaa , device=A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.encode_prefix(A )
@torch.no_grad()
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = torch.split(A , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(A ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=A , device=A , eos_token_id=A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(A )
lowerCamelCase = torch.stack(A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A=None , A=None , A=None , A = 5 , A = 67 , A = 1.0 , A = None , ) -> Any:
'''simple docstring'''
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(A , device=A , dtype=torch.int )
lowerCamelCase = torch.zeros(A , device=A , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(A )
for i in range(A ):
lowerCamelCase = self.transformer(inputs_embeds=A )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(A , -1 )
lowerCamelCase = generated.expand(A , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(A , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(A , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(A ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=A )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(A , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 66 | 0 |
def lowerCAmelCase__ ( a__: float ) -> str:
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase__ ( a__: float ) -> Union[str, Any]:
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive.' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case : Any =2_0_4_8
__snake_case : Union[str, Any] =4_0_9_6
__snake_case : Optional[Any] =4_2
__snake_case : Dict =os.environ.pop('PROCESS_TRAIN', 'false')
__snake_case : List[str] ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
def choose_first(lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any=False):
assert isinstance(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) == 1:
lowerCAmelCase__ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : Any = {k: [a[k]] for k in a}
if len(a['''start_token''']) > 0:
break
return a
lowerCAmelCase__ : Optional[Any] = {'''id''': example['''id''']}
lowerCAmelCase__ : int = example['''annotations''']
lowerCAmelCase__ : str = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : int = ['''<cls>''']
else:
lowerCAmelCase__ : Tuple = ['''short''']
lowerCAmelCase__ : int = choose_first(annotation['''short_answers'''])
if len(out['''start_token''']) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : Optional[Any] = ['''long''']
lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = []
answer.update(lowerCamelCase_)
# disregard some samples
if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Optional[Any] = True
else:
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols):
raise ValueError('''Issue in ID''' ,example['''id'''])
return answer
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=False):
'''simple docstring'''
lowerCAmelCase__ : Any = _get_single_answer(lowerCamelCase_)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Any = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : Union[str, Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Optional[Any] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
lowerCAmelCase__ : int = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : List[Any] = ''' '''.join(context[start_token:end_token])
# checking above code
if assertion:
lowerCAmelCase__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]])
if new != old:
print('''ID:''' ,example['''id'''])
print('''New:''' ,lowerCamelCase_ ,end='''\n''')
print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''')
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple=2048 ,lowerCamelCase_ : Dict=4096 ,lowerCamelCase_ : Optional[Any]=True):
'''simple docstring'''
lowerCAmelCase__ : int = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : Union[str, Any] = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
for i in doc_start_indices:
lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len
lowerCAmelCase__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer['''category'''][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase_),
"end_token": [-100] * len(lowerCamelCase_),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out['''context'''].split()
lowerCAmelCase__ : Union[str, Any] = splitted_context[answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids)
lowerCAmelCase__ : Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : int = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : Union[str, Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowerCAmelCase__ : List[str] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
if assertion:
lowerCAmelCase__ : int = tokenizer.decode(lowerCamelCase_)
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''')
print('''OLD:''' ,answer['''span'''])
print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''')
if len(lowerCamelCase_) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : Optional[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : str = i + max_length - q_len
lowerCAmelCase__ : List[str] = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : int = start_token - i + q_len
lowerCAmelCase__ : str = end_token - i + q_len
answers_category.append(answer['''category'''][0]) # ["short"] -> "short"
else:
lowerCAmelCase__ : Tuple = -100
lowerCAmelCase__ : List[str] = -100
answers_category.append('''null''')
lowerCAmelCase__ : int = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_)
answers_end_token.append(lowerCamelCase_)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' ,example['''id'''])
print('''New:''' ,tokenizer.decode(lowerCamelCase_))
print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''')
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int=2048 ,lowerCamelCase_ : Tuple=4096 ,lowerCamelCase_ : Optional[int]=False):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_strided_contexts_and_ans(
lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,)
return example
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int):
'''simple docstring'''
with jsonlines.open(lowerCamelCase_ ,'''a''') as writer:
for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''):
lowerCAmelCase__ : Optional[Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case : Optional[int] =load_dataset('natural_questions')
__snake_case : Union[str, Any] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__snake_case : Tuple =data['train' if PROCESS_TRAIN == 'true' else 'validation']
__snake_case : Optional[int] ={
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__snake_case : Dict =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case : Dict =data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 129 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ : List[str] = 250_004
UpperCAmelCase_ : Optional[Any] = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : int = MBartaaTokenizer
lowerCAmelCase : List[Any] = MBartaaTokenizerFast
lowerCAmelCase : Any = True
lowerCAmelCase : List[Any] = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ = MBartaaTokenizer(UpperCAmelCase__ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ = "<s>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __A ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCAmelCase__ ) , 1_054 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def __A ( self ):
A__ = MBartaaTokenizer(UpperCAmelCase__ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=UpperCAmelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __A ( self ):
# fmt: off
A__ = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __A ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ )
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__ )
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__ )
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__ )
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
lowerCAmelCase : Tuple = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase : Union[str, Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase : List[str] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def __A ( cls ):
A__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
A__ = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def __A ( self ):
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def __A ( self ):
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
A__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def __A ( self ):
A__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCAmelCase__ )
A__ = 10
A__ = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __A ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def __A ( self ):
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__ )
A__ = MBartaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__ )
@require_torch
def __A ( self ):
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors="pt" )
A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ):
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A ( self ):
A__ = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors="pt" )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors="pt" )
A__ = targets["input_ids"]
A__ = shift_tokens_right(UpperCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ):
A__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 198 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( _A : Optional[int] )-> List[Any]:
"""simple docstring"""
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(_A ):
A__ = time.time()
locka.acquire(_A )
assert time.time() - _start > timeout
def UpperCamelCase ( _A : str )-> List[Any]:
"""simple docstring"""
A__ = "a" * 1000 + ".lock"
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_A )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_A ):
locka.acquire(0 )
| 198 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class _lowerCamelCase ( lowerCamelCase__ ):
UpperCAmelCase_ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ = Features({"audio": Audio()} )
UpperCAmelCase_ = Features({"labels": ClassLabel} )
UpperCAmelCase_ = "audio"
UpperCAmelCase_ = "labels"
def snake_case_ (self , __a ) -> int:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase = copy.deepcopy(self )
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def snake_case_ (self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 153 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ ( _A : int = 3 ):
'''simple docstring'''
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
a__ =QuantumRegister(_A , '''qr''' )
a__ =ClassicalRegister(_A , '''cr''' )
a__ =QuantumCircuit(_A , _A )
a__ =number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
a__ =Aer.get_backend('''qasm_simulator''' )
a__ =execute(_A , _A , shots=1_00_00 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 188 | 0 |
'''simple docstring'''
import math
class _lowercase :
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ):
lowerCamelCase__ : int = 0.0
lowerCamelCase__ : Union[str, Any] = 0.0
for i in range(len(UpperCamelCase__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: list[list[int | float]] , UpperCamelCase__: list[int] , UpperCamelCase__: int , UpperCamelCase__: float ):
for i in range(len(UpperCamelCase__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE_ () -> None:
# Training Examples ( m, n )
lowerCamelCase__ : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase__ : Dict = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase__ : Optional[int] = SelfOrganizingMap()
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : Tuple = 0.5
for _ in range(UpperCamelCase ):
for j in range(len(UpperCamelCase ) ):
# training sample
lowerCamelCase__ : List[Any] = training_samples[j]
# Compute the winning vector
lowerCamelCase__ : Optional[int] = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# Update the winning vector
lowerCamelCase__ : str = self_organizing_map.update(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# classify test sample
lowerCamelCase__ : List[Any] = [0, 0, 0, 1]
lowerCamelCase__ : List[Any] = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
lowerCamelCase__ : List[str] = gray_code_sequence_string(UpperCamelCase )
#
# convert them to integers
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ : int = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase__ : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase__ : Any = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase__ : Union[str, Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase__ : Optional[int] = """0""" + smaller_sequence[i]
sequence.append(UpperCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase__ : int = """1""" + smaller_sequence[i]
sequence.append(UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
lowerCamelCase__ : Dict =text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
lowerCamelCase__ : List[str] =text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}] )
lowerCamelCase__ : Union[str, Any] =text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
lowerCamelCase__ : List[Any] =text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
# Legacy behavior
lowerCamelCase__ : List[str] =text_classifier('This is great !' , return_all_scores=lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
lowerCamelCase__ : List[str] =text_classifier('This is great !' , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]] )
lowerCamelCase__ : Any =text_classifier(['This is great !', 'Something else'] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
lowerCamelCase__ : Any =text_classifier(['This is great !', 'Something else'] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
import torch
lowerCamelCase__ : List[Any] =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
lowerCamelCase__ : Any =text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@require_tf
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
lowerCamelCase__ : Tuple =text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@slow
@require_torch
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Any =pipeline('text-classification' )
lowerCamelCase__ : List[str] =text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowerCamelCase__ : str =text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowerCamelCase__ : List[str] =text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
@slow
@require_tf
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =pipeline('text-classification' , framework='tf' )
lowerCamelCase__ : List[Any] =text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowerCamelCase__ : Union[str, Any] =text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowerCamelCase__ : int =text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =TextClassificationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ : Optional[int] ='HuggingFace is in'
lowerCamelCase__ : List[str] =text_classifier(lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
lowerCamelCase__ : List[Any] =['HuggingFace is in ', 'Paris is in France']
lowerCamelCase__ : Any =text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}, {'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ : Dict =text_classifier(lowercase_ , top_k=lowercase_ )
lowerCamelCase__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}] * N, [{'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}] * N] , )
lowerCamelCase__ : int ={'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
lowerCamelCase__ : Optional[Any] =text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , {'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ : str =[['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowercase_ ):
text_classifier(lowercase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ : Dict =text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'label': ANY(lowercase_ ), 'score': ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) | 126 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 237 | 0 |
'''simple docstring'''
def _a ( _lowercase : list ):
'''simple docstring'''
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
__UpperCAmelCase : List[Any] = []
def generate(_lowercase : int , _lowercase : list ):
__UpperCAmelCase : Dict = [0] * n
res.append(tuple(lowercase_ ) )
__UpperCAmelCase : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__UpperCAmelCase , __UpperCAmelCase : List[str] = arr[i], arr[0]
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = arr[i], arr[c[i]]
res.append(tuple(lowercase_ ) )
c[i] += 1
__UpperCAmelCase : str = 0
else:
__UpperCAmelCase : List[Any] = 0
i += 1
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
__UpperCAmelCase :List[str] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase :int = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 357 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : Dict = position_biased_input
__UpperCAmelCase : Optional[int] = pos_att_type
__UpperCAmelCase : Dict = scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Dict = 300
return config
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Optional[int] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
pass
@slow
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' ) | 240 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = (KDPMaDiscreteScheduler,)
_lowerCamelCase = 10
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase )
return config
def snake_case ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase_ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter.to(UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) )
if str(UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 55 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase = 1 , UpperCamelCase = 2000 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = self.unet.config.sample_size
lowerCamelCase_ = (batch_size, 3, img_size, img_size)
lowerCamelCase_ = self.unet
lowerCamelCase_ = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
self.scheduler.set_sigmas(UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# prediction step
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = output.prev_sample, output.prev_sample_mean
lowerCamelCase_ = sample_mean.clamp(0 , 1 )
lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase )
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ : int = 256047
A__ : List[str] = 256145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = NllbTokenizer
lowerCamelCase : int = NllbTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Dict = True
lowerCamelCase : Union[str, Any] = {}
def lowercase_ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : Optional[Any] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[str] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__lowerCamelCase : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__lowerCamelCase : int = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__lowerCamelCase : Tuple = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
__lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Any = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : Dict = tempfile.mkdtemp()
__lowerCamelCase : int = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
__lowerCamelCase : int = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
__lowerCamelCase : List[str] = tokenizer_r.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Any = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : Tuple = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : Tuple = tokenizer_r.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
def lowercase_ ( self ) -> List[Any]:
if not self.test_seqaseq:
return
__lowerCamelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__lowerCamelCase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase : Dict = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__lowerCamelCase : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCamelCase : Optional[Any] = tokenizer.prepare_seqaseq_batch(
_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCamelCase : str = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Tuple = [AddedToken('<special>' , lstrip=_lowerCAmelCase )]
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer_r.encode('Hey this is a <special> token' )
__lowerCamelCase : List[Any] = tokenizer_r.encode('<special>' , add_special_tokens=_lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer_p.encode('Hey this is a <special> token' )
__lowerCamelCase : int = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'facebook/nllb-200-distilled-600M'
lowerCamelCase : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase : Dict = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def lowercase_ ( cls ) -> int:
__lowerCamelCase : List[Any] = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__lowerCamelCase : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_60_57 )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def lowercase_ ( self ) -> List[str]:
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
__lowerCamelCase : List[str] = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
__lowerCamelCase : List[Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
__lowerCamelCase : Optional[int] = 10
__lowerCamelCase : Optional[int] = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def lowercase_ ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_62_03, 3] )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = tempfile.mkdtemp()
__lowerCamelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
__lowerCamelCase : int = NllbTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__lowerCamelCase : Union[str, Any] = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCamelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
__lowerCamelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='pt' )
__lowerCamelCase : int = targets['input_ids']
__lowerCamelCase : List[Any] = shift_tokens_right(
_lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Tuple = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[25_60_47, 70, 73_56, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_60_57,
} , )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Dict = True
__lowerCamelCase : Tuple = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
__lowerCamelCase : str = False
__lowerCamelCase : str = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 185 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 3_84
SCREAMING_SNAKE_CASE_ = 15_36
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 225 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 100_0000 ):
lowercase__ : Optional[int] = 1
lowercase__ : List[str] = 1
lowercase__ : int = {1: 1}
for inputa in range(2 , UpperCAmelCase ):
lowercase__ : int = 0
lowercase__ : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase__ : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase__ : Tuple = counter
if counter > pre_counter:
lowercase__ : str = inputa
lowercase__ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 214 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=[0, 1, 2, 3] , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=[1, 384, 24, 24] , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : str = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : Tuple = patch_size
lowercase__ : str = num_channels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = backbone_out_indices
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : Optional[int] = backbone_featmap_shape
lowercase__ : int = scope
lowercase__ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[str] = (image_size // patch_size) ** 2
lowercase__ : Tuple = num_patches + 1
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = DPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : str = DPTForDepthEstimation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = self.num_labels
lowercase__ : Tuple = DPTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = DPTModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> Tuple:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(__lowerCAmelCase )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
if model_class in get_values(__lowerCAmelCase ):
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase__ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = False
lowercase__ : str = True
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
lowercase__ : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> List[str]:
pass
@slow
def _lowerCAmelCase( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ : Dict = DPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = '''add'''
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = DPTForDepthEstimation(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__ : List[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__lowerCAmelCase )
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : str = outputs.predicted_depth
# verify the predicted depth
lowercase__ : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowerCAmelCase )
lowercase__ : str = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1E-4 ) )
| 214 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
a__ : List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
a__ : List[str] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
a__ : Optional[int] = [2, 4, 1, 5]
a__ : Optional[Any] = len(train_data)
a__ : Union[str, Any] = 0.0_09
def _lowercase ( __A ,__A="train" ):
'''simple docstring'''
return calculate_hypothesis_value(__A ,__A ) - output(
__A ,__A )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowercase ( __A ,__A ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowercase ( __A ,__A ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowercase ( __A ,__A=m ):
'''simple docstring'''
__UpperCamelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = summation_of_cost_derivative(__A ,__A ) / m
return cost_derivative_value
def _lowercase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCamelCase = 0.00_0002
__UpperCamelCase = 0
__UpperCamelCase = 0
while True:
j += 1
__UpperCamelCase = [0, 0, 0, 0]
for i in range(0 ,len(__A ) ):
__UpperCamelCase = get_cost_derivative(i - 1 )
__UpperCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A ,__A ,atol=__A ,rtol=__A ,):
break
__UpperCamelCase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def _lowercase ( ):
'''simple docstring'''
for i in range(len(__A ) ):
print(("""Actual output value:""", output(__A ,"""test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__A ,"""test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "Speech2TextFeatureExtractor"
lowercase__ = "Speech2TextTokenizer"
def __init__( self: Optional[Any], a_: Any, a_: Optional[int] ):
'''simple docstring'''
super().__init__(a_, a_ )
_snake_case : Union[str, Any] = self.feature_extractor
_snake_case : Optional[int] = False
def __call__( self: str, *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_snake_case : Tuple = kwargs.pop("""raw_speech""" )
else:
_snake_case : Optional[int] = kwargs.pop("""audio""", a_ )
_snake_case : List[Any] = kwargs.pop("""sampling_rate""", a_ )
_snake_case : Optional[int] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : str = args[0]
_snake_case : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_snake_case : Union[str, Any] = self.feature_extractor(a_, *a_, sampling_rate=a_, **a_ )
if text is not None:
_snake_case : Optional[Any] = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : Dict = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: List[str], *a_: List[str], **a_: Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: Tuple, *a_: List[str], **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_snake_case : Dict = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : Optional[Any] = self.feature_extractor
_snake_case : int = False
| 132 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Dict = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 132 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Union[str, Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCamelCase : Any = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase ):
lowercase = data
lowercase = None
def __str__( self ):
return F'{self.data}'
class a ( Generic[T] ):
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(_lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def UpperCamelCase_ ( self ):
return self.top is None
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Node(_lowerCamelCase )
if not self.is_empty():
lowercase = self.top
lowercase = node
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _lowerCamelCase )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase_ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[int] ={'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] =['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =[
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(4 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
_lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
_lowerCAmelCase = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
_lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 220 | 0 |
'''simple docstring'''
lowerCamelCase : int = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE (A , A , A = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 2 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 1 |
'''simple docstring'''
from torch import nn
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __A ( unittest.TestCase ):
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCAmelCase : Tuple = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=a__ , cache_dir=a__ )
_lowerCAmelCase : Optional[int] = [t[-1] for t in os.walk(os.path.join(a__ , os.listdir(a__ )[0] , """snapshots""" ) )]
_lowerCAmelCase : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=a__ )
_lowerCAmelCase : str = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : str = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Any = jax.device_count()
_lowerCAmelCase : Any = num_samples * [prompt]
_lowerCAmelCase : List[str] = pipeline.prepare_inputs(a__ )
# shard inputs and rng
_lowerCAmelCase : Optional[Any] = replicate(a__ )
_lowerCAmelCase : Optional[Any] = jax.random.split(a__ , a__ )
_lowerCAmelCase : str = shard(a__ )
_lowerCAmelCase : Any = pipeline(a__ , a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(a__ , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
_lowerCAmelCase : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(a__ ) == num_samples
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=a__ )
_lowerCAmelCase : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[int] = 50
_lowerCAmelCase : List[Any] = jax.device_count()
_lowerCAmelCase : List[Any] = num_samples * [prompt]
_lowerCAmelCase : Union[str, Any] = pipeline.prepare_inputs(a__ )
# shard inputs and rng
_lowerCAmelCase : int = replicate(a__ )
_lowerCAmelCase : Dict = jax.random.split(a__ , a__ )
_lowerCAmelCase : Optional[int] = shard(a__ )
_lowerCAmelCase : Dict = pipeline(a__ , a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=a__ )
_lowerCAmelCase : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : str = jax.random.PRNGKey(0 )
_lowerCAmelCase : List[Any] = 50
_lowerCAmelCase : int = jax.device_count()
_lowerCAmelCase : Union[str, Any] = num_samples * [prompt]
_lowerCAmelCase : Optional[Any] = pipeline.prepare_inputs(a__ )
# shard inputs and rng
_lowerCAmelCase : Tuple = replicate(a__ )
_lowerCAmelCase : Any = jax.random.split(a__ , a__ )
_lowerCAmelCase : str = shard(a__ )
_lowerCAmelCase : str = pipeline(a__ , a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
_lowerCAmelCase : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : str = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[int] = 50
_lowerCAmelCase : Dict = jax.device_count()
_lowerCAmelCase : int = num_samples * [prompt]
_lowerCAmelCase : List[str] = pipeline.prepare_inputs(a__ )
# shard inputs and rng
_lowerCAmelCase : Union[str, Any] = replicate(a__ )
_lowerCAmelCase : List[Any] = jax.random.split(a__ , a__ )
_lowerCAmelCase : Optional[int] = shard(a__ )
_lowerCAmelCase : Optional[int] = pipeline(a__ , a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __A ( self ):
_lowerCAmelCase : Any = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , set_alpha_to_one=a__ , steps_offset=1 , )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=a__ , safety_checker=a__ , )
_lowerCAmelCase : List[str] = scheduler.create_state()
_lowerCAmelCase : List[Any] = scheduler_state
_lowerCAmelCase : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : str = jax.random.PRNGKey(0 )
_lowerCAmelCase : List[Any] = 50
_lowerCAmelCase : str = jax.device_count()
_lowerCAmelCase : Optional[int] = num_samples * [prompt]
_lowerCAmelCase : str = pipeline.prepare_inputs(a__ )
# shard inputs and rng
_lowerCAmelCase : Tuple = replicate(a__ )
_lowerCAmelCase : List[Any] = jax.random.split(a__ , a__ )
_lowerCAmelCase : Tuple = shard(a__ )
_lowerCAmelCase : Optional[Any] = pipeline(a__ , a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __A ( self ):
_lowerCAmelCase : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_lowerCAmelCase : Union[str, Any] = jax.device_count()
_lowerCAmelCase : str = num_samples * [prompt]
_lowerCAmelCase : Dict = jax.random.split(jax.random.PRNGKey(0 ) , a__ )
_lowerCAmelCase , _lowerCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=a__ , )
_lowerCAmelCase : Union[str, Any] = replicate(a__ )
_lowerCAmelCase : List[Any] = pipeline.prepare_inputs(a__ )
_lowerCAmelCase : Dict = shard(a__ )
_lowerCAmelCase : List[str] = pipeline(a__ , a__ , a__ , jit=a__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_lowerCAmelCase : Dict = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_lowerCAmelCase , _lowerCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=a__ , use_memory_efficient_attention=a__ , )
_lowerCAmelCase : Any = replicate(a__ )
_lowerCAmelCase : Tuple = pipeline.prepare_inputs(a__ )
_lowerCAmelCase : List[str] = shard(a__ )
_lowerCAmelCase : List[Any] = pipeline(a__ , a__ , a__ , jit=a__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_lowerCAmelCase : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 44 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = XLNetTokenizer
lowerCamelCase :Optional[Any] = XLNetTokenizerFast
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :Dict = True
def UpperCAmelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_A = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> Any:
_A = """<s>"""
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 10_06 )
def UpperCAmelCase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase ( self ) -> str:
_A = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_A = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase ( self ) -> Dict:
# fmt: off
_A = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 357 | import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def snake_case ( snake_case__ :str = "dhaka" , snake_case__ :int = 5) -> int:
_A = min(snake_case__ , 50) # Prevent abuse!
_A = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_A = requests.get("""https://www.google.com/search""" , params=snake_case__ , headers=snake_case__)
_A = BeautifulSoup(html.text , """html.parser""")
_A = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script"""))))
_A = json.dumps(snake_case__)
_A = json.loads(snake_case__)
_A = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , snake_case__ , )
if not matched_google_image_data:
return 0
_A = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(snake_case__) , )
_A = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , snake_case__ , )
for index, fixed_full_res_image in enumerate(snake_case__):
if index >= max_images:
return index
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = urllib.request.build_opener()
_A = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(snake_case__)
_A = F'''query_{query.replace(' ' , '_')}'''
if not os.path.exists(snake_case__):
os.makedirs(snake_case__)
urllib.request.urlretrieve( # noqa: S310
snake_case__ , F'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 81 | 0 |
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowerCAmelCase = ''
__lowerCAmelCase = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowerCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowerCAmelCase = [1 for i in range(len(lowerCAmelCase_ ) )]
# for each character in new_string find corresponding palindromic string
__lowerCAmelCase = 0
for j in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowerCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowerCAmelCase = j - k + 1 # noqa: E741
__lowerCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowerCAmelCase = length[j]
__lowerCAmelCase = j
# create that string
__lowerCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Any = params
lowercase__: List[Any] = np.array(lowerCAmelCase__ )
lowercase__: Optional[Any] = np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.lengths )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.params.max_model_input_size
lowercase__: Dict = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCAmelCase__ )} too long sequences.' )
def divide_chunks(lowerCAmelCase__ , lowerCAmelCase__ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
lowercase__: str = []
lowercase__: List[str] = []
if self.params.mlm:
lowercase__ , lowercase__: str = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase__ , lowercase__: int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__: Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__: int = np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
lowercase__: Union[str, Any] = np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = len(self )
lowercase__: Union[str, Any] = self.lengths > 11
lowercase__: List[Any] = self.token_ids[indices]
lowercase__: Optional[Any] = self.lengths[indices]
lowercase__: List[str] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__: List[str] = self.params.special_tok_ids['unk_token']
lowercase__: str = len(self )
lowercase__: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__: Any = (unk_occs / self.lengths) < 0.5
lowercase__: Tuple = self.token_ids[indices]
lowercase__: str = self.lengths[indices]
lowercase__: Dict = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = [t[0] for t in batch]
lowercase__: Dict = [t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
lowercase__: List[Any] = max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
lowercase__: Dict = self.params.special_tok_ids['pad_token']
else:
lowercase__: Optional[Any] = self.params.special_tok_ids['unk_token']
lowercase__: int = [list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
lowercase__: Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__: Optional[Any] = torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 196 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28 | 0 |
lowerCAmelCase__ :Optional[Any] = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ :List[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase__ ( a__: str ) -> bool:
'''simple docstring'''
if not isinstance(a__ , a__ ):
_UpperCAmelCase = F'''Expected string as input, found {type(a__ ).__name__}'''
raise TypeError(a__ )
_UpperCAmelCase = spanish_id.replace('-' , '' ).upper()
if len(a__ ) != 9:
raise ValueError(a__ )
try:
_UpperCAmelCase = int(spanish_id_clean[0:8] )
_UpperCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a__ ) from ex
if letter.isdigit():
raise ValueError(a__ )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[Any] = 'blenderbot-small'
A : Any = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : str = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : str = d_model
lowercase : Union[str, Any] = encoder_ffn_dim
lowercase : Any = encoder_layers
lowercase : Any = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : Union[str, Any] = decoder_attention_heads
lowercase : Union[str, Any] = dropout
lowercase : List[Any] = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[Any] = encoder_layerdrop
lowercase : List[str] = decoder_layerdrop
lowercase : Tuple = use_cache
lowercase : List[str] = encoder_layers
lowercase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase : Optional[Any] = {0: '''batch'''}
lowercase : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase : Tuple = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase : Optional[Any] = super().outputs
else:
lowercase : Union[str, Any] = super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
lowercase : Dict = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
lowercase : Optional[Any] = seq_length if not self.use_past else 1
lowercase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase : Any = dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase : int = common_inputs['''input_ids'''].shape
lowercase : str = common_inputs['''decoder_input_ids'''].shape[1]
lowercase : Union[str, Any] = self.num_attention_heads
lowercase : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : List[str] = decoder_seq_length + 3
lowercase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
lowercase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase : Union[str, Any] = self.num_layers
lowercase : Tuple = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
lowercase : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
lowercase : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : List[str] = seqlen + 2
lowercase : str = self.num_layers
lowercase : List[str] = self.num_attention_heads
lowercase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowercase : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
lowercase : Optional[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : Any = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Dict = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : int = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : str = dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowercase : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
elif self.task == "causal-lm":
lowercase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
lowercase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if self.task in ["default", "seq2seq-lm"]:
lowercase : Union[str, Any] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowercase : Tuple = super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 356 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__a = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
__a = '''▁'''
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Optional[Any] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : List[str] = do_lower_case
lowercase : Tuple = remove_space
lowercase : Tuple = keep_accents
lowercase : str = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Any = {}
lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.remove_space:
lowercase : int = ''' '''.join(inputs.strip().split() )
else:
lowercase : List[Any] = inputs
lowercase : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase : Optional[Any] = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowercase : Union[str, Any] = outputs.lower()
return outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Optional[int] = cur_pieces[1:]
else:
lowercase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = []
lowercase : Tuple = ''''''
lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowercase : Union[str, Any] = True
lowercase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[int] = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 173 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = BartphoTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = True
def UpperCAmelCase_ ( self : List[str] ) -> int:
super().setUp()
__SCREAMING_SNAKE_CASE = ["▁This", "▁is", "▁a", "▁t", "est"]
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : str , **UpperCAmelCase__ : int ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "▁This ▁is ▁a ▁l à ▁t est".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
| 54 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase_ : str = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A ) | 81 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
if args.student_type == "roberta":
a__: Union[str, Any] = False
elif args.student_type == "gpt2":
a__: List[str] = False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if args.student_type == "roberta":
a__: str = False
def __a ( ) ->List[Any]:
a__: str = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_SCREAMING_SNAKE_CASE , choices=['distilbert', 'roberta', 'gpt2'] , required=_SCREAMING_SNAKE_CASE , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_SCREAMING_SNAKE_CASE , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_SCREAMING_SNAKE_CASE , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_SCREAMING_SNAKE_CASE , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_SCREAMING_SNAKE_CASE , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_SCREAMING_SNAKE_CASE , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_SCREAMING_SNAKE_CASE , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_SCREAMING_SNAKE_CASE , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_SCREAMING_SNAKE_CASE , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_SCREAMING_SNAKE_CASE , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_SCREAMING_SNAKE_CASE , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_SCREAMING_SNAKE_CASE , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_SCREAMING_SNAKE_CASE , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_SCREAMING_SNAKE_CASE , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_SCREAMING_SNAKE_CASE , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_SCREAMING_SNAKE_CASE , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_SCREAMING_SNAKE_CASE , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=_SCREAMING_SNAKE_CASE , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=_SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_SCREAMING_SNAKE_CASE , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_SCREAMING_SNAKE_CASE , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_SCREAMING_SNAKE_CASE , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_SCREAMING_SNAKE_CASE , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_SCREAMING_SNAKE_CASE , default=4000 , help='Checkpoint interval.' )
a__: Dict = parser.parse_args()
sanity_checks(_SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(_SCREAMING_SNAKE_CASE )
set_seed(_SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__: Optional[int] = MODEL_CLASSES[args.student_type]
a__ , a__ , a__: List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__: str = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__: int = tokenizer.all_special_tokens.index(_SCREAMING_SNAKE_CASE )
a__: str = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
a__: Optional[int] = special_tok_ids
a__: Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , 'rb' ) as fp:
a__: str = pickle.load(_SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , 'rb' ) as fp:
a__: str = pickle.load(_SCREAMING_SNAKE_CASE )
a__: int = np.maximum(_SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__: Any = 0.0 # do not predict special tokens
a__: Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = None
a__: Tuple = LmSeqsDataset(params=_SCREAMING_SNAKE_CASE , data=_SCREAMING_SNAKE_CASE )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
a__: Optional[int] = student_config_class.from_pretrained(args.student_config )
a__: List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
a__: Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_SCREAMING_SNAKE_CASE )
else:
a__: str = student_model_class(_SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
a__: int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__: int = Distiller(
params=_SCREAMING_SNAKE_CASE , dataset=_SCREAMING_SNAKE_CASE , token_probs=_SCREAMING_SNAKE_CASE , student=_SCREAMING_SNAKE_CASE , teacher=_SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 203 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: Any = limit + 1
a__: List[str] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 203 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : Optional[Any] = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''mgp-str'''
def __init__( self , A__=[32, 128] , A__=4 , A__=3 , A__=27 , A__=38 , A__=5_0257 , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=4.0 , A__=True , A__=False , A__=1e-5 , A__=0.0 , A__=0.0 , A__=0.0 , A__=False , A__=0.0_2 , **A__ , ):
super().__init__(**A__ )
A__ : Dict = image_size
A__ : int = patch_size
A__ : Dict = num_channels
A__ : List[Any] = max_token_length
A__ : str = num_character_labels
A__ : Tuple = num_bpe_labels
A__ : Optional[Any] = num_wordpiece_labels
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = mlp_ratio
A__ : Tuple = distilled
A__ : Union[str, Any] = layer_norm_eps
A__ : Tuple = drop_rate
A__ : List[str] = qkv_bias
A__ : Optional[Any] = attn_drop_rate
A__ : Union[str, Any] = drop_path_rate
A__ : Optional[Any] = output_aa_attentions
A__ : Optional[int] = initializer_range
| 192 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
if isinstance(__A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = after_output[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
_snake_case = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = to_atuple(vision_model.config.image_size )
_snake_case = to_atuple(vision_model.config.patch_size )
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pt_model.to(lowerCAmelCase_ )
pt_model.eval()
# prepare inputs
_snake_case = inputs_dict
_snake_case = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_snake_case = pt_model(**lowerCAmelCase_ ).to_tuple()
_snake_case = fx_model(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
_snake_case = fx_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ )
pt_model_loaded.to(lowerCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
_snake_case = pt_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output_loaded.numpy() , 4E-2 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ )
_snake_case = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case = config_inputs_dict.pop('vision_config' )
_snake_case = config_inputs_dict.pop('text_config' )
_snake_case = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.check_equivalence_flax_to_pt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.get_pretrained_model_and_inputs()
_snake_case = model_a(**lowerCAmelCase_ )
_snake_case = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
_snake_case = model_a(**lowerCAmelCase_ )
_snake_case = after_outputs[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-5 )
@require_flax
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = FlaxViTModel(lowerCAmelCase_ )
_snake_case = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxViTModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = vit_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = FlaxCLIPVisionModel(lowerCAmelCase_ )
_snake_case = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxCLIPVisionModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = clip_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_snake_case = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' )
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1E-3 ) )
| 160 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : int="pt" ):
'''simple docstring'''
lowercase__ : str = {'add_prefix_space': True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(' ' ) else {}
lowercase__ : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ):
'''simple docstring'''
lowercase__ : Any = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a , a , a , a="train" , a=None , a=None , a=None , a="" , ):
super().__init__()
lowercase__ : Tuple = Path(a).joinpath(type_path + '.source')
lowercase__ : List[Any] = Path(a).joinpath(type_path + '.target')
lowercase__ : Optional[int] = self.get_char_lens(self.src_file)
lowercase__ : Dict = max_source_length
lowercase__ : int = max_target_length
assert min(self.src_lens) > 0, f"""found empty line in {self.src_file}"""
lowercase__ : List[Any] = tokenizer
lowercase__ : Any = prefix
if n_obs is not None:
lowercase__ : List[Any] = self.src_lens[:n_obs]
lowercase__ : Union[str, Any] = src_lang
lowercase__ : Dict = tgt_lang
def __len__( self):
return len(self.src_lens)
def __getitem__( self , a):
lowercase__ : Union[str, Any] = index + 1 # linecache starts at 1
lowercase__ : int = self.prefix + linecache.getline(str(self.src_file) , a).rstrip('\n')
lowercase__ : Tuple = linecache.getline(str(self.tgt_file) , a).rstrip('\n')
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : List[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a) else self.tokenizer
)
lowercase__ : Any = self.tokenizer.generator if isinstance(self.tokenizer , a) else self.tokenizer
lowercase__ : int = encode_line(a , a , self.max_source_length , 'right')
lowercase__ : Optional[int] = encode_line(a , a , self.max_target_length , 'right')
lowercase__ : str = source_inputs['input_ids'].squeeze()
lowercase__ : Optional[Any] = target_inputs['input_ids'].squeeze()
lowercase__ : Optional[int] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( a):
return [len(a) for x in Path(a).open().readlines()]
def snake_case_ ( self , a):
lowercase__ : int = torch.stack([x['input_ids'] for x in batch])
lowercase__ : Union[str, Any] = torch.stack([x['attention_mask'] for x in batch])
lowercase__ : Dict = torch.stack([x['decoder_input_ids'] for x in batch])
lowercase__ : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a)
else self.tokenizer.pad_token_id
)
lowercase__ : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a)
else self.tokenizer.pad_token_id
)
lowercase__ : List[Any] = trim_batch(a , a)
lowercase__ , lowercase__ : List[str] = trim_batch(a , a , attention_mask=a)
lowercase__ : Optional[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
snake_case_ = getLogger(__name__)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Dict = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'git_log.json' ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=4 , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = {
'repo_id': str(SCREAMING_SNAKE_CASE_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Iterable ):
'''simple docstring'''
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE_ : int ):
return re.sub(R'\b(a|an|the)\b' , ' ' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ : List[str] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ : int ):
lowercase__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : int = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
lowercase__ : Any = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
lowercase__ : List[Any] = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : Optional[Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : Any = 'dropout_rate'
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
lowercase__ : Any = p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 214 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=8 ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : List[str]=5_1_2 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__UpperCAmelCase : str = np.array(pil_image.convert("""RGB""" ) )
__UpperCAmelCase : Optional[Any] = arr.astype(np.floataa ) / 127.5 - 1
__UpperCAmelCase : List[Any] = np.transpose(_UpperCamelCase , [2, 0, 1] )
__UpperCAmelCase : Union[str, Any] = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : DDPMScheduler , UpperCamelCase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = min(int(num_inference_steps * strength ) , UpperCamelCase )
__UpperCAmelCase : int = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase )}''' )
__UpperCAmelCase : Optional[Any] = image.to(device=UpperCamelCase , dtype=UpperCamelCase )
__UpperCAmelCase : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__UpperCAmelCase : Tuple = image
else:
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase )
]
__UpperCAmelCase : Any = torch.cat(UpperCamelCase , dim=0 )
else:
__UpperCAmelCase : List[str] = self.movq.encode(UpperCamelCase ).latent_dist.sample(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = self.movq.config.scaling_factor * init_latents
__UpperCAmelCase : Any = torch.cat([init_latents] , dim=0 )
__UpperCAmelCase : Any = init_latents.shape
__UpperCAmelCase : str = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
# get latents
__UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = init_latents
return latents
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCAmelCase : List[str] = torch.device(f'''cuda:{gpu_id}''' )
__UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase : Tuple = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase )
# We'll offload the last model manually.
__UpperCAmelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__( self : List[Any] , UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase : int = 512 , UpperCamelCase : int = 512 , UpperCamelCase : int = 100 , UpperCamelCase : float = 4.0 , UpperCamelCase : float = 0.3 , UpperCamelCase : int = 1 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._execution_device
__UpperCAmelCase : List[Any] = guidance_scale > 1.0
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Any = torch.cat(UpperCamelCase , dim=0 )
__UpperCAmelCase : int = image_embeds.shape[0]
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : int = torch.cat(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
__UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
__UpperCAmelCase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : List[str] = [image]
if not all(isinstance(UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__UpperCAmelCase : List[Any] = torch.cat([prepare_image(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in image] , dim=0 )
__UpperCAmelCase : Optional[int] = image.to(dtype=image_embeds.dtype , device=UpperCamelCase )
__UpperCAmelCase : int = self.movq.encode(UpperCamelCase )["""latents"""]
__UpperCAmelCase : Tuple = latents.repeat_interleave(UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
__UpperCAmelCase : List[str] = self.get_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__UpperCAmelCase : int = downscale_height_and_width(UpperCamelCase , UpperCamelCase , self.movq_scale_factor )
__UpperCAmelCase : int = self.prepare_latents(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , image_embeds.dtype , UpperCamelCase , UpperCamelCase )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : Any = {"""image_embeds""": image_embeds}
__UpperCAmelCase : List[str] = self.unet(
sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase : Any = noise_pred.chunk(2 )
__UpperCAmelCase : Any = variance_pred.chunk(2 )
__UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : List[Any] = self.scheduler.step(
UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , )[0]
# post-processing
__UpperCAmelCase : List[str] = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__UpperCAmelCase : int = image * 0.5 + 0.5
__UpperCAmelCase : List[Any] = image.clamp(0 , 1 )
__UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 350 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """AutoImageProcessor"""
__a = """AutoTokenizer"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
__UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 320 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE__ = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE__ = object
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
pass
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = logging.get_logger("transformers-cli/serving")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Namespace ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
@staticmethod
def _snake_case ( lowercase ) -> Any:
lowerCAmelCase = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowercase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowercase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowercase , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowercase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowercase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowercase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowercase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = pipeline
lowerCAmelCase = host
lowerCAmelCase = port
lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'Serving model over {host}:{port}' )
lowerCAmelCase = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowercase , response_class=lowercase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowercase , response_class=lowercase , methods=["""POST"""] , ),
] , timeout=600 , )
def _snake_case ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _snake_case ( self ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _snake_case ( self , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) ) -> Dict:
try:
lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowercase )
if return_ids:
lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase )
return ServeTokenizeResult(tokens=lowercase , tokens_ids=lowercase )
else:
return ServeTokenizeResult(tokens=lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase )} )
def _snake_case ( self , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) , lowercase = Body(lowercase , embed=lowercase ) , ) -> Any:
try:
lowerCAmelCase = self._pipeline.tokenizer.decode(lowercase , lowercase , lowercase )
return ServeDeTokenizeResult(model="""""" , text=lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase )} )
async def _snake_case ( self , lowercase=Body(lowercase , embed=lowercase ) ) -> str:
# Check we don't have empty string
if len(lowercase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase = self._pipeline(lowercase )
return ServeForwardResult(output=lowercase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(lowercase )} )
| 46 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 184 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = tempfile.mkdtemp()
__a : Tuple = 5
# Realm tok
__a : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : Any = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(A_ , exist_ok=A_ )
__a : Optional[int] = os.path.join(A_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__a : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(A_ , exist_ok=A_ )
def lowerCAmelCase (self : int ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase (self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase (self : int ):
__a : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase (self : Tuple ):
__a : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase (self : Dict ):
__a : List[str] = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=A_ , )
return block_records
def lowerCAmelCase (self : int ):
__a : str = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase (self : str ):
__a : List[str] = self.get_config()
__a : Optional[int] = self.get_dummy_retriever()
__a : Optional[int] = retriever.tokenizer
__a : List[str] = np.array([0, 3] , dtype='''long''' )
__a : Any = tokenizer(['''Test question'''] ).input_ids
__a : Optional[Any] = tokenizer(
['''the fourth'''] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__a : int = config.reader_seq_len
__a , __a , __a , __a : Optional[int] = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='''np''' )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase (self : List[Any] ):
__a : List[str] = self.get_config()
__a : Dict = self.get_dummy_retriever()
__a : str = retriever.tokenizer
__a : Union[str, Any] = np.array([0, 3, 5] , dtype='''long''' )
__a : List[Any] = tokenizer(['''Test question'''] ).input_ids
__a : List[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__a : int = config.reader_seq_len
__a , __a , __a , __a : Any = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__a : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__a : Optional[int] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 371 |
def __UpperCamelCase ( lowerCAmelCase__ : list ):
def merge(lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase__ ) <= 1:
return collection
__a : str = len(lowerCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 90 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : int = logging.get_logger(__name__)
lowercase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __snake_case ( lowerCAmelCase ):
_a : str= "instructblip_vision_model"
def __init__( self ,snake_case=1408 ,snake_case=6144 ,snake_case=39 ,snake_case=16 ,snake_case=224 ,snake_case=14 ,snake_case="gelu" ,snake_case=1e-6 ,snake_case=0.0 ,snake_case=1e-10 ,snake_case=True ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Any = hidden_size
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : List[Any] = patch_size
lowercase : Any = image_size
lowercase : List[Any] = initializer_range
lowercase : Optional[Any] = attention_dropout
lowercase : str = layer_norm_eps
lowercase : Optional[int] = hidden_act
lowercase : Union[str, Any] = qkv_bias
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase : Optional[int] = cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowercase : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case ,**snake_case )
class __snake_case ( lowerCAmelCase ):
_a : List[str]= "instructblip_qformer"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=0 ,snake_case="absolute" ,snake_case=2 ,snake_case=1408 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,**snake_case )
lowercase : Tuple = vocab_size
lowercase : str = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : List[str] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Optional[Any] = max_position_embeddings
lowercase : Any = initializer_range
lowercase : List[Any] = layer_norm_eps
lowercase : Optional[Any] = position_embedding_type
lowercase : Dict = cross_attention_frequency
lowercase : int = encoder_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase : Tuple = cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowercase : Any = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case ,**snake_case )
class __snake_case ( lowerCAmelCase ):
_a : List[str]= "instructblip"
_a : List[Any]= True
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case=32 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
if vision_config is None:
lowercase : int = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowercase : Tuple = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowercase : Tuple = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowercase : Dict = InstructBlipVisionConfig(**snake_case )
lowercase : Optional[Any] = InstructBlipQFormerConfig(**snake_case )
lowercase : Optional[int] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase : Optional[Any] = CONFIG_MAPPING[text_model_type](**snake_case )
lowercase : Union[str, Any] = self.text_config.tie_word_embeddings
lowercase : Optional[int] = self.text_config.is_encoder_decoder
lowercase : Tuple = num_query_tokens
lowercase : int = self.vision_config.hidden_size
lowercase : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase : Tuple = 1.0
lowercase : int = 0.02
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case ,snake_case ,**snake_case ,):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase : str = self.vision_config.to_dict()
lowercase : int = self.qformer_config.to_dict()
lowercase : List[Any] = self.text_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output
| 20 |
'''simple docstring'''
from collections.abc import Generator
def __magic_name__( ):
__lowerCAmelCase , __lowerCAmelCase = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase = b, a + b
yield b
def __magic_name__( lowerCamelCase = 1_0_0_0):
__lowerCAmelCase = 1
__lowerCAmelCase = fibonacci_generator()
while len(str(next(lowerCamelCase))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_snake_case : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_snake_case : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_snake_case : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_snake_case : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_snake_case : Any = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_snake_case : Optional[Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_snake_case : int = tf.keras.preprocessing.image.img_to_array(test_image)
_snake_case : Tuple = np.expand_dims(test_image, axis=0)
_snake_case : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_snake_case : Any = "Normal"
if result[0][0] == 1:
_snake_case : List[str] = "Abnormality detected"
| 123 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( _lowercase : Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]:
"""simple docstring"""
a__ : Tuple = []
if isinstance(_lowercase , _lowercase):
for v in tree.values():
shapes.extend(_fetch_dims(_lowercase))
elif isinstance(_lowercase , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_lowercase))
elif isinstance(_lowercase , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("""Not supported""")
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Tuple[int, ...]) -> Tuple[int, ...]:
"""simple docstring"""
a__ : Union[str, Any] = []
for d in reversed(_lowercase):
idx.append(flat_idx % d)
a__ : List[Any] = flat_idx // d
return tuple(reversed(_lowercase))
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : Sequence[int] , _lowercase : Sequence[int] , _lowercase : Sequence[int] , _lowercase : Optional[Sequence[bool]] = None , _lowercase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_lowercase : List[bool]) -> None:
a__ : int = True
for i in range(len(_lowercase)):
a__ : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : str = l[reversed_idx]
if start_edges is None:
a__ : Tuple = [s == 0 for s in start]
reduce_edge_list(_lowercase)
if end_edges is None:
a__ : List[str] = [e == (d - 1) for e, d in zip(_lowercase , _lowercase)]
reduce_edge_list(_lowercase)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowercase) == 0:
return [()]
elif len(_lowercase) == 1:
return [(slice(start[0] , end[0] + 1),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowercase , _lowercase):
if s == e:
path_list.append(slice(_lowercase , s + 1))
else:
break
a__ : Tuple[slice, ...] = tuple(_lowercase)
a__ : Union[str, Any] = len(_lowercase)
# start == end, and we're done
if divergence_idx == len(_lowercase):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(_lowercase , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Dict = end[divergence_idx]
return tuple(
path + (slice(_lowercase , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
a__ : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : torch.Tensor , _lowercase : int , _lowercase : int , _lowercase : int) -> torch.Tensor:
"""simple docstring"""
a__ : str = t.shape[:no_batch_dims]
a__ : List[Any] = list(_flat_idx_to_idx(_lowercase , _lowercase))
# _get_minimal_slice_set is inclusive
a__ : List[str] = list(_flat_idx_to_idx(flat_end - 1 , _lowercase))
# Get an ordered list of slices to perform
a__ : Dict = _get_minimal_slice_set(
_lowercase , _lowercase , _lowercase , )
a__ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowerCAmelCase_ ( _lowercase : Callable , _lowercase : Dict[str, Any] , _lowercase : int , _lowercase : int , _lowercase : bool = False , _lowercase : Any = None , _lowercase : bool = False , ) -> Any:
"""simple docstring"""
if not (len(_lowercase) > 0):
raise ValueError("""Must provide at least one input""")
a__ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(_lowercase)]
a__ : Union[str, Any] = tuple([max(_lowercase) for s in zip(*_lowercase)])
def _prep_inputs(_lowercase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
a__ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
a__ : int = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
a__ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowercase)
a__ : Any = None
if _out is not None:
a__ : List[str] = tensor_tree_map(lambda _lowercase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
a__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowercase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : List[Any] = 0
a__ : int = prepped_outputs
for _ in range(_lowercase):
# Chunk the input
if not low_mem:
a__ : Union[str, Any] = _select_chunk
else:
a__ : Any = partial(
_chunk_slice , flat_start=_lowercase , flat_end=min(_lowercase , i + chunk_size) , no_batch_dims=len(_lowercase) , )
a__ : Dict[str, Any] = tensor_tree_map(_lowercase , _lowercase)
# Run the layer on the chunk
a__ : str = layer(**_lowercase)
# Allocate space for the output
if out is None:
a__ : int = tensor_tree_map(lambda _lowercase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , _lowercase)
# Put the chunk in its pre-allocated space
if isinstance(_lowercase , _lowercase):
def assign(_lowercase : dict , _lowercase : dict) -> None:
for k, v in da.items():
if isinstance(_lowercase , _lowercase):
assign(_lowercase , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Any = da[k]
assign(_lowercase , _lowercase)
elif isinstance(_lowercase , _lowercase):
for xa, xa in zip(_lowercase , _lowercase):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(_lowercase , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : int = output_chunk
else:
raise ValueError("""Not supported""")
i += chunk_size
a__ : Any = tensor_tree_map(lambda _lowercase: t.view(orig_batch_dims + t.shape[1:]) , _lowercase)
return out
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase = 5_1_2 , ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : Optional[int] = [c for c in candidates if c > min_chunk_size]
a__ : Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowercase ) -> bool:
try:
with torch.no_grad():
fn(*__lowercase , chunk_size=__lowercase )
return True
except RuntimeError:
return False
a__ : Optional[Any] = 0
a__ : Union[str, Any] = len(__lowercase ) - 1
while i > min_viable_chunk_size_index:
a__ : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
a__ : int = (min_viable_chunk_size_index + i) // 2
else:
a__ : Dict = i
a__ : List[str] = (i + len(__lowercase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(__lowercase , __lowercase ):
assert type(__lowercase ) == type(__lowercase )
if isinstance(__lowercase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowercase , __lowercase )
elif isinstance(__lowercase , __lowercase ):
a__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda __lowercase : x[0] )]
a__ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda __lowercase : x[0] )]
consistent &= self._compare_arg_caches(__lowercase , __lowercase )
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = True
a__ : tuple = tree_map(lambda __lowercase : a.shape if isinstance(__lowercase , torch.Tensor ) else a , __lowercase , __lowercase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowercase )
a__ : Tuple = self._compare_arg_caches(self.cached_arg_data , __lowercase )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : Optional[Any] = self._determine_favorable_chunk_size(
__lowercase , __lowercase , __lowercase , )
a__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 266 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : int ={
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : List[Any] = 4
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = """left"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : Dict="<unk>" , SCREAMING_SNAKE_CASE__ : int="<sep>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<cls>" , SCREAMING_SNAKE_CASE__ : int="<mask>" , SCREAMING_SNAKE_CASE__ : Dict=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = 3
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : Optional[int] ) -> Dict:
return len(self.sp_model )
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> List[str]:
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCamelCase = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
__lowerCamelCase = ''''''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ''' ''' ).strip()
return out_string
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> str:
__lowerCamelCase = kwargs.pop('''use_source_tokenizer''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCamelCase = []
__lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCamelCase = ''''''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCamelCase = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
_A = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
_A = np.array(_UpperCamelCase )
_A = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
_A = (1, 2, 1)
_A = (1, 1, 0, 7)
_A = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
_A = model.fit(disp=_UpperCamelCase , maxiter=600 , method="nm" )
_A = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
_A = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
_A = regressor.predict(_UpperCamelCase )
return y_pred[0]
def __lowercase ( __lowercase ) -> float:
'''simple docstring'''
train_user.sort()
_A = np.percentile(_UpperCamelCase , 25 )
_A = np.percentile(_UpperCamelCase , 75 )
_A = qa - qa
_A = qa - (iqr * 0.1)
return low_lim
def __lowercase ( __lowercase , __lowercase ) -> bool:
'''simple docstring'''
_A = 0
_A = 0
for i in list_vote:
if i > actual_result:
_A = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCamelCase_ = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
lowerCamelCase_ = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCamelCase_ = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCamelCase_ = normalize_df[:, 2].tolist()
lowerCamelCase_ = normalize_df[:, 0].tolist()
lowerCamelCase_ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCamelCase_ = normalize_df[:, [1, 2]].tolist()
lowerCamelCase_ = x[: len(x) - 1]
lowerCamelCase_ = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCamelCase_ = total_date[: len(total_date) - 1]
lowerCamelCase_ = total_user[: len(total_user) - 1]
lowerCamelCase_ = total_match[: len(total_match) - 1]
lowerCamelCase_ = total_date[len(total_date) - 1 :]
lowerCamelCase_ = total_user[len(total_user) - 1 :]
lowerCamelCase_ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCamelCase_ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCamelCase_ = "" if data_safety_checker(res_vote, tst_user) else "not "
print('''Today\'s data is {not_str}safe.''')
| 359 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_A = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def __lowercase ( __lowercase ) -> Dict:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = tuple(__UpperCAmelCase )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = " ".join(__UpperCAmelCase )
_A = word
return word
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_A = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Any ):
'''simple docstring'''
_A = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = super()._pad(
encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_A = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCAmelCase )
if needs_to_be_padded:
_A = len(__UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_A = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 174 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowerCAmelCase = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase__ : Tuple = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
lowerCAmelCase__ : List[Any] = config_class.from_json_file(UpperCamelCase )
lowerCAmelCase__ : str = True
lowerCAmelCase__ : int = True
print(f"""Building TensorFlow model from configuration: {config}""" )
lowerCAmelCase__ : Optional[int] = model_class(UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase__ : int = cached_file(
UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase__ : Any = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase , UpperCamelCase )
if compare_with_pt_model:
lowerCAmelCase__ : Optional[Any] = tf_model(tf_model.dummy_inputs , training=UpperCamelCase ) # build the network
lowerCAmelCase__ : List[Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCAmelCase__ : Optional[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCamelCase , config=UpperCamelCase , state_dict=UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ : int = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase__ : Dict = pto[0].numpy()
lowerCAmelCase__ : Union[str, Any] = tfo[0].numpy()
lowerCAmelCase__ : Any = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(UpperCamelCase , save_format="""h5""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ):
"""simple docstring"""
if args_model_type is None:
lowerCAmelCase__ : int = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase__ : List[str] = [args_model_type]
for j, model_type in enumerate(UpperCamelCase , start=1 ):
print("""=""" * 100 )
print(f""" Converting model type {j}/{len(UpperCamelCase )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase__ : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase__ : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCamelCase , UpperCamelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
lowerCAmelCase__ : str = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase__ : List[Any] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
lowerCAmelCase__ : Tuple = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase__ : List[Any] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
lowerCAmelCase__ : Optional[int] = model_shortcut_name
if os.path.isfile(UpperCamelCase ):
lowerCAmelCase__ : int = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=UpperCamelCase , pytorch_checkpoint_path=UpperCamelCase , config_file=UpperCamelCase , tf_dump_path=os.path.join(UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=UpperCamelCase , )
if remove_cached_files:
os.remove(UpperCamelCase )
os.remove(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_lowerCAmelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37 | """simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44 | 0 |
import torch
from torch import nn
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[Any]=1 , UpperCamelCase : List[str]=False ):
'''simple docstring'''
super().__init__()
_snake_case : Tuple = n_token
_snake_case : str = d_embed
_snake_case : Optional[int] = d_proj
_snake_case : Tuple = cutoffs + [n_token]
_snake_case : List[Any] = [0] + self.cutoffs
_snake_case : Optional[int] = div_val
_snake_case : Any = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs ) - 1
_snake_case : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_snake_case : str = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case : List[Any] = nn.ModuleList()
_snake_case : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) )
else:
self.out_projs.append(UpperCamelCase )
self.out_layers.append(nn.Linear(UpperCamelCase , UpperCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase , r_idx - l_idx ) )
_snake_case : List[Any] = keep_order
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict ):
'''simple docstring'''
if proj is None:
_snake_case : List[str] = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : Optional[int] = nn.functional.linear(UpperCamelCase , proj.t().contiguous() )
_snake_case : Dict = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : str = hidden[..., :-1, :].contiguous()
_snake_case : Union[str, Any] = labels[..., 1:].contiguous()
_snake_case : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
_snake_case : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
_snake_case : List[str] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case : Dict = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_snake_case : Any = labels != -1_00
_snake_case : int = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
_snake_case : int = (
-nn.functional.log_softmax(UpperCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case : Optional[Any] = nn.functional.log_softmax(UpperCamelCase , dim=-1 )
else:
# construct weights and biases
_snake_case : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Union[str, Any] = self.out_layers[i].weight
_snake_case : Tuple = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_snake_case : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase )
biases.append(UpperCamelCase )
_snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : int = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase , dim=1 )
if labels is None:
_snake_case : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case : Dict = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
_snake_case : int = 0
_snake_case : int = [0] + self.cutoffs
for i in range(len(UpperCamelCase ) - 1 ):
_snake_case : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Tuple = (labels >= l_idx) & (labels < r_idx)
_snake_case : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Any = labels.index_select(0 , UpperCamelCase ) - l_idx
_snake_case : Optional[int] = head_logprob.index_select(0 , UpperCamelCase )
_snake_case : str = hidden.index_select(0 , UpperCamelCase )
else:
_snake_case : Dict = hidden
if i == 0:
if labels is not None:
_snake_case : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_snake_case : str = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case : List[Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Optional[Any] = nn.functional.log_softmax(UpperCamelCase , dim=1 )
_snake_case : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : Tuple = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if self.n_clusters == 0:
_snake_case : Any = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase , dim=-1 )
else:
# construct weights and biases
_snake_case : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Tuple = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Optional[Any] = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_snake_case : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase )
biases.append(UpperCamelCase )
_snake_case : Any = weights[0], biases[0], self.out_projs[0]
_snake_case : Optional[int] = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case : Optional[Any] = nn.functional.log_softmax(UpperCamelCase , dim=1 )
_snake_case : Optional[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase ) - 1 ):
_snake_case : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : Optional[int] = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Any = nn.functional.log_softmax(UpperCamelCase , dim=1 )
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Tuple = logprob_i
return out
| 352 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 260 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : List[str] = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__snake_case : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCAmelCase ( a__):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a_ : Union[str, Any] = model_type_to_module_name(a__)
a_ : List[Any] = importlib.import_module(f'''.{module_name}''' , """transformers.models""")
try:
return getattr(a__ , a__)
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a__ , """__name__""" , a__) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a_ : Union[str, Any] = importlib.import_module("""transformers""")
if hasattr(a__ , a__):
return getattr(a__ , a__)
return None
def _UpperCAmelCase ( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
'''simple docstring'''
a_ : List[Any] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""")
return {}
with open(a__ , encoding="""utf-8""") as reader:
return json.load(a__)
class A__:
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Dict:
a_ : Tuple = kwargs.pop("""config""" , _lowercase )
a_ : Any = kwargs.pop("""trust_remote_code""" , _lowercase )
a_ : Tuple = True
a_ , a_ : Any = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
a_ : int = config_dict.get("""image_processor_type""" , _lowercase )
a_ : Dict = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
a_ : Union[str, Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a_ : Any = config_dict.pop("""feature_extractor_type""" , _lowercase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
a_ : Optional[Any] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a_ : int = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
a_ : str = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
a_ : List[str] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
a_ : List[str] = getattr(_lowercase , """image_processor_type""" , _lowercase )
if hasattr(_lowercase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
a_ : Optional[int] = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
a_ : str = image_processor_class_from_name(_lowercase )
a_ : int = image_processor_auto_map is not None
a_ : List[Any] = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
a_ : Optional[Any] = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
a_ : Dict = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
a_ : Dict = kwargs.pop("""code_revision""" , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
a_ : List[str] = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> str:
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 248 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = original_name.split(""".""")[0]
a_ : List[Any] = key.split(""".""")
a_ : List[Any] = int(key_list[key_list.index(a__) - 2])
a_ : Dict = int(key_list[key_list.index(a__) - 1])
a_ : Any = orig_block_num - offset
a_ : Optional[int] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = OrderedDict()
a_ , a_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
a_ : str = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
a_ : Tuple = key[: key.find("""proj""")]
a_ : Dict = key.replace(a__ , f'''patch_embeddings.{total_embed_found}.''')
a_ : Optional[Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
a_ : int = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
a_ : Union[str, Any] = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""")
if "norm2" in key:
a_ : Any = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
a_ : List[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
a_ : Optional[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
a_ : Optional[Any] = key.replace("""head""" , """classifier""")
a_ : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = Image.open(requests.get(a__ , stream=a__).raw)
return image
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : str = PoolFormerConfig()
# set attributes based on model_name
a_ : Union[str, Any] = """huggingface/label-files"""
a_ : str = model_name[-3:]
a_ : Tuple = 1_0_0_0
a_ : List[str] = """imagenet-1k-id2label.json"""
a_ : Any = (1, 1_0_0_0)
# set config attributes
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Tuple = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
a_ : Optional[int] = [2, 2, 6, 2]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Tuple = 0.9
elif size == "s24":
a_ : List[Any] = [4, 4, 1_2, 4]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Optional[Any] = 0.9
elif size == "s36":
a_ : str = [6, 6, 1_8, 6]
a_ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : Optional[int] = 4.0
a_ : Optional[int] = 1e-6
a_ : Tuple = 0.9
elif size == "m36":
a_ : str = [6, 6, 1_8, 6]
a_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : str = 4.0
a_ : Union[str, Any] = 1e-6
a_ : str = 0.95
elif size == "m48":
a_ : List[Any] = [8, 8, 2_4, 8]
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : int = 4.0
a_ : int = 1e-6
a_ : List[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
# Prepare image
a_ : List[Any] = prepare_img()
a_ : List[str] = image_processor(images=a__ , return_tensors="""pt""").pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
a_ : List[str] = torch.load(a__ , map_location=torch.device("""cpu"""))
# rename keys
a_ : List[Any] = rename_keys(a__)
# create HuggingFace model and load state dict
a_ : List[str] = PoolFormerForImageClassification(a__)
model.load_state_dict(a__)
model.eval()
# Define image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
a_ : Any = model(a__)
a_ : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
a_ : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
a_ : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
a_ : int = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
a_ : List[str] = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
a_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 248 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : str=32 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : int=4 , __lowerCamelCase : Optional[int]=[0, 1, 2, 3] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=3 , __lowerCamelCase : Tuple=[1, 384, 24, 24] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = backbone_out_indices
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def lowercase_ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowercase_ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = DPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowercase_ ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = DPTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = DPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowercase_ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowercase_ ( self : List[Any] ) -> int:
pass
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowercase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ )
def lowercase_ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def lowercase_ ( self : str ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
if model_class in get_values(UpperCamelCase_ ):
continue
SCREAMING_SNAKE_CASE__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE__ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
pass
@slow
def lowercase_ ( self : str ) -> str:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE__ = DPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowercase_ ( self : Dict ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = '''add'''
with self.assertRaises(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation(UpperCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCamelCase_ , atol=1e-4 ) )
| 357 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218 | 0 |
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(10)}
def _A ( lowercase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase__ ) )
def _A ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowercase__ ) )
if __name__ == "__main__":
print(solution())
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
snake_case__ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def snake_case__ ( lowerCamelCase__ : List[str]=True ) -> str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
A_ : List[Any] = dataset_module_factory(_lowerCamelCase , cache_dir=_lowerCamelCase )
A_ : Tuple = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
A_ : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name=_lowerCamelCase , hash=dataset_module.hash , )
A_ : List[str] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCamelCase ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
A_ : Optional[int] = cached_path(_lowerCamelCase , cache_dir=_lowerCamelCase )
self.assertTrue(os.path.exists(_lowerCamelCase ) )
@pytest.mark.integration
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
A_ : Tuple = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
A_ : Optional[Any] = dataset_module_factory('''wikipedia''' , cache_dir=lowerCamelCase__ )
A_ : str = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : Union[str, Any] = None
builder_instance.download_and_prepare()
A_ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def snake_case__ ( lowerCamelCase__ : Any ) -> Dict:
A_ : Dict = dataset_module_factory('''wikipedia''' , cache_dir=lowerCamelCase__ )
A_ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
A_ : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowerCamelCase__ )
assert next(iter(ds['''train'''] ) )
| 4 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 239 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# We need to create solution object to save path.
__lowercase = [[0 for _ in range(A__ )] for _ in range(A__ )]
__lowercase = run_maze(A__ , 0 , 0 , A__ )
if solved:
print('''\n'''.join(str(A__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# Final check point.
if i == j == (size - 1):
__lowercase = 1
return True
__lowercase = (not i < 0) and (not j < 0) # Check lower bounds
__lowercase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
__lowercase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
__A = BlenderbotConfig
__A = {}
__A = '''gelu'''
def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=13 , lowercase_ : Any=7 , lowercase_ : List[str]=True , lowercase_ : List[str]=False , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Tuple=20 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=1 , lowercase_ : List[str]=0 , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFBlenderbotModel(config=lowercase_).get_decoder()
_UpperCamelCase = inputs_dict["input_ids"]
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase = inputs_dict["head_mask"]
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_)[0]
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3)
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) ->Tuple:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFBlenderbotModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_)
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = ['''My friends are cool but they eat too many carbs.''']
__A = '''facebook/blenderbot-400M-distill'''
@cached_property
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(self.src_text , return_tensors="tf")
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 362 | from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = path_or_paths
_UpperCamelCase = split if split or isinstance(lowercase_ , lowercase_) else "train"
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> str:
"""simple docstring"""
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 63 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_a = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_a = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_a = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_a = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_a = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_a = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_a = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_a = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_a = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class A_ (UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = DPRContextEncoderTokenizer
class A_ (UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPRQuestionEncoderTokenizer
_a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_a = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class A_ :
'''simple docstring'''
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
UpperCAmelCase_ : List[Any] = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
UpperCAmelCase_ : Union[str, Any] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
UpperCAmelCase_ : Any = len(lowercase_ )
UpperCAmelCase_ : Dict = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
assert len(lowercase_ ) == len(
lowercase_ ), F"""There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts."""
UpperCAmelCase_ : List[str] = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )['''input_ids''']
UpperCAmelCase_ : Tuple = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )['''input_ids''']
UpperCAmelCase_ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : int = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = 16 , lowercase_ = 64 , lowercase_ = 4 , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = reader_input['''input_ids''']
UpperCAmelCase_ : Optional[int] = reader_output[:3]
UpperCAmelCase_ : List[Any] = len(lowercase_ )
UpperCAmelCase_ : Tuple = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : str = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : str = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : List[str] = len(lowercase_ )
UpperCAmelCase_ : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
UpperCAmelCase_ : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase_ : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A_ (UpperCAmelCase__ ,UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : int = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPRReaderTokenizer
| 61 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 91 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if openai_config_file == "":
UpperCamelCase__ = OpenAIGPTConfig()
else:
UpperCamelCase__ = OpenAIGPTConfig.from_json_file(_UpperCamelCase )
UpperCamelCase__ = OpenAIGPTModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__lowercase: int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
) | 31 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 31 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=10_24 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = classifier_dropout
lowercase_ : Any = channel_shrink_ratio
lowercase_ : List[Any] = max_ad_position_embeddings
| 93 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__)
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
A__ = Features({"audio": Audio()})
A__ = Features({"transcription": Value("string")})
A__ = "audio"
A__ = "transcription"
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
lowerCamelCase__ : Tuple = copy.deepcopy(self )
lowerCamelCase__ : Tuple = self.input_schema.copy()
lowerCamelCase__ : Optional[int] = features[self.audio_column]
lowerCamelCase__ : int = input_schema
return task_template
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 184 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Any = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 |
from math import factorial
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :str = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Tuple = [1] * rank
else:
lowercase :List[str] = rank
def __repr__( self: Optional[Any] ):
return (
F"{self.real}+"
F"{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self: int , _lowerCAmelCase: Tuple ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase :Dict = self.duals.copy()
lowercase :List[str] = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase :Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
_a = __add__
def __sub__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
return self + other * -1
def __mul__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase :Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
_a = __mul__
def __truediv__( self: List[str] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self: int , _lowerCAmelCase: Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self: Any , _lowerCAmelCase: Tuple ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase :Optional[int] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not callable(lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase, (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase :str = Dual(lowerCamelCase, 1 )
lowercase :Dict = func(lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ ( lowerCamelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 158 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Any =['''pixel_values''']
def __init__(self , lowerCAmelCase = True , lowerCAmelCase = 1 / 2_5_5 , lowerCAmelCase = True , lowerCAmelCase = 8 , **lowerCAmelCase , ):
super().__init__(**__lowercase )
__lowercase= do_rescale
__lowercase= rescale_factor
__lowercase= do_pad
__lowercase= pad_size
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase, __lowercase= get_image_size(__lowercase )
__lowercase= (old_height // size + 1) * size - old_height
__lowercase= (old_width // size + 1) * size - old_width
return pad(__lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__lowercase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
__lowercase= do_rescale if do_rescale is not None else self.do_rescale
__lowercase= rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase= do_pad if do_pad is not None else self.do_pad
__lowercase= pad_size if pad_size is not None else self.pad_size
__lowercase= make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__lowercase= [to_numpy_array(__lowercase ) for image in images]
if do_rescale:
__lowercase= [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_pad:
__lowercase= [self.pad(__lowercase , size=__lowercase ) for image in images]
__lowercase= [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowercase= {'pixel_values': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 295 |
'''simple docstring'''
import math
class lowerCAmelCase :
def snake_case ( self : Optional[int] , __lowercase : list[list[float]] , __lowercase : list[int] ):
"""simple docstring"""
__lowercase =0.0
__lowercase =0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case ( self : Union[str, Any] , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase =SelfOrganizingMap()
__lowercase =3
__lowercase =0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowercase =training_samples[j]
# Compute the winning vector
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# Update the winning vector
__lowercase =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ )
# classify test sample
__lowercase =[0, 0, 0, 1]
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 141 | 0 |
from typing import Any
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = v.conjugate().T
lowerCAmelCase__ = v_star.dot(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ ))
def __lowerCamelCase ( ):
lowerCAmelCase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCAmelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCAmelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 119 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ = TaTokenizerFast
lowerCAmelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 119 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCamelCase : Optional[List[bool]]
__UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 121 |
UpperCAmelCase__ : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( a , a , a ) -> list[str]:
_A: Union[str, Any] = set()
# keep track of all the paths to be checked
_A: Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_A: Any = queue.pop(0 )
# get the last node from the path
_A: Union[str, Any] = path[-1]
if node not in explored:
_A: str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_A: Optional[int] = list(a )
new_path.append(a )
queue.append(a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( a , a , a ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_A: Any = [start]
_A: List[str] = set(a )
# Keep tab on distances from `start` node.
_A: Optional[int] = {start: 0, target: -1}
while queue:
_A: Union[str, Any] = queue.pop(0 )
if node == target:
_A: Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a )
queue.append(a )
_A: List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 121 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : str , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase_ ( self : Any ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[Any] ):
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ):
_A = TFEsmModel(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(_UpperCAmelCase )
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , ):
_A = True
_A = TFEsmModel(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(_UpperCAmelCase )
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str ):
_A = TFEsmForMaskedLM(config=_UpperCAmelCase )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
def lowerCAmelCase_ ( self : List[str] ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : str ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Tuple ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(_UpperCAmelCase )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 315 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCAmelCase__ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCAmelCase__ = {
'jukebox': 512,
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Any , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Optional[int]=["v3", "v2", "v2"] , lowercase__ : int=512 , lowercase__ : Optional[int]=5 , lowercase__ : List[Any]="<|endoftext|>" , **lowercase__ : List[Any] , ):
'''simple docstring'''
lowerCAmelCase__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token
super().__init__(
unk_token=lowercase__ , n_genres=lowercase__ , version=lowercase__ , max_n_lyric_tokens=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = version
lowerCAmelCase__ = max_n_lyric_tokens
lowerCAmelCase__ = n_genres
with open(lowercase__ , encoding='utf-8') as vocab_handle:
lowerCAmelCase__ = json.load(lowercase__)
with open(lowercase__ , encoding='utf-8') as vocab_handle:
lowerCAmelCase__ = json.load(lowercase__)
with open(lowercase__ , encoding='utf-8') as vocab_handle:
lowerCAmelCase__ = json.load(lowercase__)
lowerCAmelCase__ = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 79:
lowerCAmelCase__ = oov.replace(r'\-\'' , r'\-+\'')
lowerCAmelCase__ = regex.compile(lowercase__)
lowerCAmelCase__ = {v: k for k, v in self.artists_encoder.items()}
lowerCAmelCase__ = {v: k for k, v in self.genres_encoder.items()}
lowerCAmelCase__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __snake_case ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __snake_case ( self : List[Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __snake_case ( self : str , lowercase__ : Dict , lowercase__ : int , lowercase__ : Tuple):
'''simple docstring'''
lowerCAmelCase__ = [self.artists_encoder.get(lowercase__ , 0) for artist in list_artists]
for genres in range(len(lowercase__)):
lowerCAmelCase__ = [self.genres_encoder.get(lowercase__ , 0) for genre in list_genres[genres]]
lowerCAmelCase__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
lowerCAmelCase__ = [[self.lyrics_encoder.get(lowercase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __snake_case ( self : int , lowercase__ : List[Any]):
'''simple docstring'''
return list(lowercase__)
def __snake_case ( self : Dict , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , **lowercase__ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_for_tokenization(lowercase__ , lowercase__ , lowercase__)
lowerCAmelCase__ = self._tokenize(lowercase__)
return artist, genre, lyrics
def __snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : str , lowercase__ : str , lowercase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
lowerCAmelCase__ = artists[idx].lower()
lowerCAmelCase__ = [genres[idx].lower()]
else:
lowerCAmelCase__ = self._normalize(artists[idx]) + '.v2'
lowerCAmelCase__ = [
self._normalize(lowercase__) + '.v2' for genre in genres[idx].split('_')
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCAmelCase__ = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+')
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowerCAmelCase__ = {vocab[index]: index + 1 for index in range(len(lowercase__))}
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(lowercase__) + 1
lowerCAmelCase__ = self.vocab
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
lowerCAmelCase__ = ''
else:
lowerCAmelCase__ = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+')
lowerCAmelCase__ = self._run_strip_accents(lowercase__)
lowerCAmelCase__ = lyrics.replace('\\' , '\n')
lowerCAmelCase__ = self.out_of_vocab.sub('' , lowercase__), [], []
return artists, genres, lyrics
def __snake_case ( self : Any , lowercase__ : List[str]):
'''simple docstring'''
lowerCAmelCase__ = unicodedata.normalize('NFD' , lowercase__)
lowerCAmelCase__ = []
for char in text:
lowerCAmelCase__ = unicodedata.category(lowercase__)
if cat == "Mn":
continue
output.append(lowercase__)
return "".join(lowercase__)
def __snake_case ( self : Optional[int] , lowercase__ : str):
'''simple docstring'''
lowerCAmelCase__ = (
[chr(lowercase__) for i in range(ord('a') , ord('z') + 1)]
+ [chr(lowercase__) for i in range(ord('A') , ord('Z') + 1)]
+ [chr(lowercase__) for i in range(ord('0') , ord('9') + 1)]
+ ['.']
)
lowerCAmelCase__ = frozenset(lowercase__)
lowerCAmelCase__ = re.compile(r'_+')
lowerCAmelCase__ = ''.join([c if c in accepted else '_' for c in text.lower()])
lowerCAmelCase__ = pattern.sub('_' , lowercase__).strip('_')
return text
def __snake_case ( self : int , lowercase__ : List[str]):
'''simple docstring'''
return " ".join(lowercase__)
def __snake_case ( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = False):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = TensorType(lowercase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')
import tensorflow as tf
lowerCAmelCase__ = tf.constant
lowerCAmelCase__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
lowerCAmelCase__ = torch.tensor
lowerCAmelCase__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')
import jax.numpy as jnp # noqa: F811
lowerCAmelCase__ = jnp.array
lowerCAmelCase__ = _is_jax
else:
lowerCAmelCase__ = np.asarray
lowerCAmelCase__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCAmelCase__ = [inputs]
if not is_tensor(lowercase__):
lowerCAmelCase__ = as_tensor(lowercase__)
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.')
return inputs
def __call__( self : Tuple , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any]="" , lowercase__ : int="pt"):
'''simple docstring'''
lowerCAmelCase__ = [0, 0, 0]
lowerCAmelCase__ = [artist] * len(self.version)
lowerCAmelCase__ = [genres] * len(self.version)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.tokenize(lowercase__ , lowercase__ , lowercase__)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._convert_token_to_id(lowercase__ , lowercase__ , lowercase__)
lowerCAmelCase__ = [-INFINITY] * len(full_tokens[-1])
lowerCAmelCase__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowercase__)
for i in range(len(self.version))
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})
def __snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase__):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCAmelCase__ = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])
with open(lowercase__ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowercase__))
lowerCAmelCase__ = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])
with open(lowercase__ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowercase__))
lowerCAmelCase__ = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])
with open(lowercase__ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowercase__))
return (artists_file, genres_file, lyrics_file)
def __snake_case ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = self.artists_decoder.get(lowercase__)
lowerCAmelCase__ = [self.genres_decoder.get(lowercase__) for genre in genres_index]
lowerCAmelCase__ = [self.lyrics_decoder.get(lowercase__) for character in lyric_index]
return artist, genres, lyrics
| 119 | from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 42
@flax_register_to_config
class a_ ( nn.Module , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 32
UpperCAmelCase_ = 4
UpperCAmelCase_ = 4
UpperCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ = False
UpperCAmelCase_ = (320, 640, 1_280, 1_280)
UpperCAmelCase_ = 2
UpperCAmelCase_ = 8
UpperCAmelCase_ = None
UpperCAmelCase_ = 1_280
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = False
UpperCAmelCase_ = jnp.floataa
UpperCAmelCase_ = True
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
def __snake_case ( self : Optional[int] , lowercase__ : jax.random.KeyArray):
'''simple docstring'''
lowerCAmelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase__ = jnp.zeros(lowercase__ , dtype=jnp.floataa)
lowerCAmelCase__ = jnp.ones((1,) , dtype=jnp.intaa)
lowerCAmelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
lowerCAmelCase__ , lowerCAmelCase__ = jax.random.split(lowercase__)
lowerCAmelCase__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__)["params"]
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.block_out_channels
lowerCAmelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
lowerCAmelCase__ = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype)
lowerCAmelCase__ = self.only_cross_attention
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (num_attention_heads,) * len(self.down_block_types)
# down
lowerCAmelCase__ = []
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(lowercase__) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase__)
lowerCAmelCase__ = down_blocks
# mid
lowerCAmelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase__ = []
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = reversed_block_out_channels[min(i + 1 , len(lowercase__) - 1)]
lowerCAmelCase__ = i == len(lowercase__) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase__)
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = up_blocks
# out
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : bool = True , lowercase__ : bool = False , ):
'''simple docstring'''
if not isinstance(lowercase__ , jnp.ndarray):
lowerCAmelCase__ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowercase__ , jnp.ndarray) and len(timesteps.shape) == 0:
lowerCAmelCase__ = timesteps.astype(dtype=jnp.floataa)
lowerCAmelCase__ = jnp.expand_dims(lowercase__ , 0)
lowerCAmelCase__ = self.time_proj(lowercase__)
lowerCAmelCase__ = self.time_embedding(lowercase__)
# 2. pre-process
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 2, 3, 1))
lowerCAmelCase__ = self.conv_in(lowercase__)
# 3. down
lowerCAmelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
else:
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase__ , lowercase__):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase__ = new_down_block_res_samples
# 4. mid
lowerCAmelCase__ = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = up_block(
lowercase__ , temb=lowercase__ , encoder_hidden_states=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train , )
else:
lowerCAmelCase__ = up_block(lowercase__ , temb=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train)
# 6. post-process
lowerCAmelCase__ = self.conv_norm_out(lowercase__)
lowerCAmelCase__ = nn.silu(lowercase__)
lowerCAmelCase__ = self.conv_out(lowercase__)
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase__)
| 119 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "pix2struct_text_model"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, lowerCAmelCase__=5_0244, lowerCAmelCase__=768, lowerCAmelCase__=64, lowerCAmelCase__=2048, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=32, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-6, lowerCAmelCase__=1.0, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0, lowerCAmelCase__=False, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Optional[int]:
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_layers
snake_case_ = num_heads
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = use_cache
snake_case_ = eos_token_id
snake_case_ = decoder_start_token_id
# for backwards compatibility
snake_case_ = dense_act_fn
super().__init__(
pad_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, decoder_start_token_id=lowerCAmelCase__, tie_word_embeddings=lowerCAmelCase__, is_decoder=lowerCAmelCase__, **lowerCAmelCase__, )
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase__)
snake_case_ , snake_case_ = cls.get_config_dict(lowerCAmelCase__, **lowerCAmelCase__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
snake_case_ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowerCAmelCase__, **lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "pix2struct_vision_model"
def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=768, lowerCAmelCase__=2048, lowerCAmelCase__=64, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__="gelu_new", lowerCAmelCase__=1e-6, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=1e-10, lowerCAmelCase__=1.0, lowerCAmelCase__=4096, lowerCAmelCase__=32, lowerCAmelCase__=128, **lowerCAmelCase__, ) -> List[Any]:
super().__init__(**lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = patch_embed_hidden_size
snake_case_ = d_ff
snake_case_ = dropout_rate
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = initializer_range
snake_case_ = initializer_factor
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = dense_act_fn
snake_case_ = seq_len
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = d_kv
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase__)
snake_case_ , snake_case_ = cls.get_config_dict(lowerCAmelCase__, **lowerCAmelCase__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
snake_case_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowerCAmelCase__, **lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "pix2struct"
SCREAMING_SNAKE_CASE_ = True
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=1.0, lowerCAmelCase__=0.02, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
super().__init__(tie_word_embeddings=lowerCAmelCase__, is_encoder_decoder=lowerCAmelCase__, **lowerCAmelCase__)
if text_config is None:
snake_case_ = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.')
if vision_config is None:
snake_case_ = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.')
snake_case_ = PixaStructTextConfig(**lowerCAmelCase__)
snake_case_ = PixaStructVisionConfig(**lowerCAmelCase__)
snake_case_ = self.text_config.decoder_start_token_id
snake_case_ = self.text_config.pad_token_id
snake_case_ = self.text_config.eos_token_id
snake_case_ = initializer_factor
snake_case_ = initializer_range
snake_case_ = self.initializer_range
snake_case_ = self.initializer_range
snake_case_ = is_vqa
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = copy.deepcopy(self.__dict__)
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 69 | """simple docstring"""
from math import factorial
def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 69 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a__ ( lowercase : int = 1500000 ) -> int:
"""simple docstring"""
_UpperCamelCase = defaultdict(lowercase )
_UpperCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, lowercase, 2 ):
if gcd(lowercase, lowercase ) > 1:
continue
_UpperCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowercase, limit + 1, lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''bird'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''Chef in the kitchen'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowercase = nums[0]
lowercase = 0
for num in nums[1:]:
lowercase , lowercase = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = [[float('inf' ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__lowerCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(UpperCamelCase__ ):
# looping through rows of graph array
for i in range(UpperCamelCase__ ):
# looping through columns of graph array
for j in range(UpperCamelCase__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowerCamelCase = dist[i][k] + dist[k][j]
_print_dist(UpperCamelCase__ , UpperCamelCase__ )
return dist, v
if __name__ == "__main__":
__A = int(input("Enter number of vertices: "))
__A = int(input("Enter number of edges: "))
__A = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A = int(input("Enter source:"))
__A = int(input("Enter destination:"))
__A = float(input("Enter weight:"))
__A = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 348 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__snake_case =[
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def a_ ( lowerCamelCase : Optional[Any]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Dict = None
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> int:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = dataset_module_factory(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase__ )
lowerCAmelCase = builder_cls(
cache_dir=UpperCAmelCase__ , config_name=UpperCAmelCase__ , hash=dataset_module.hash , )
lowerCAmelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase__ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase = cached_path(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
self.assertTrue(os.path.exists(UpperCAmelCase__ ) )
@pytest.mark.integration
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase = None
builder_instance.download_and_prepare()
lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCamelCase )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase , lowerCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , lowerCamelCase )
assert next(iter(ds['train'] ) )
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=7):
lowercase__ : Any = None
if token is not None:
lowercase__ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ : Tuple = "636036"
lowercase__ : int = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : Tuple = get_daily_ci_runs(_lowerCamelCase)
lowercase__ : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ : List[str] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Tuple):
lowercase__ : Tuple = get_last_daily_ci_runs(_lowerCamelCase)
if workflow_run_id is not None:
lowercase__ : Optional[Any] = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : str):
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : Optional[int] = {}
for artifact_name in artifact_names:
lowercase__ : str = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''')
if os.path.isfile(_lowerCamelCase):
lowercase__ : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
with z.open(_lowerCamelCase) as f:
lowercase__ : Dict = f.read().decode("UTF-8")
return results
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ : Optional[int] =get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ , a__=0 ) -> int:
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(f"""Saving model to {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=a__ , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def __lowercase ( a__ , a__ , a__ , a__ , a__=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__SCREAMING_SNAKE_CASE = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading model from {input_model_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading model from {input_model_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE = (
os.path.join(a__ , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=a__ , storage_reader=dist_cp.FileSystemReader(a__ ) , planner=DefaultLoadPlanner() , )
__SCREAMING_SNAKE_CASE = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(a__ )
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__=0 ) -> List[Any]:
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE = FSDP.optim_state_dict(a__ , a__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__SCREAMING_SNAKE_CASE = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(a__ , a__ )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
__SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__=0 ) -> str:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__SCREAMING_SNAKE_CASE = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(a__ )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__SCREAMING_SNAKE_CASE = (
os.path.join(a__ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(a__ ) , )
__SCREAMING_SNAKE_CASE = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
__SCREAMING_SNAKE_CASE = FSDP.optim_state_dict_to_load(a__ , a__ , a__ )
optimizer.load_state_dict(a__ )
| 118 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
def __lowercase ( a__ , a__=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowercase ( a__ , a__ , a__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = ''
else:
__SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( a__ , a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def __lowercase ( a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
__SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
__SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__SCREAMING_SNAKE_CASE = 1_92
__SCREAMING_SNAKE_CASE = 7_68
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small' ):
__SCREAMING_SNAKE_CASE = 3_84
__SCREAMING_SNAKE_CASE = 15_36
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__SCREAMING_SNAKE_CASE = 10_24
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
# load original model from timm
__SCREAMING_SNAKE_CASE = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE = timm_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , a__ )
# load HuggingFace model
__SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=a__ , crop_size=config.image_size )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = encoding['pixel_values']
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1E-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : str =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.