code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCamelCase_ : Any = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCamelCase_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase_ : str = value
elif weight_type == "weight_g":
lowerCamelCase_ : int = value
elif weight_type == "weight_v":
lowerCamelCase_ : int = value
elif weight_type == "bias":
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : Union[str, Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Any = fairseq_model.state_dict()
lowerCamelCase_ : Optional[int] = hf_model.feature_extractor
lowerCamelCase_ : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ : int = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ : Dict = True
if "*" in mapped_key:
lowerCamelCase_ : str = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
lowerCamelCase_ : Union[str, Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase_ : List[str] = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
lowerCamelCase_ : List[Any] = '''bias'''
elif "weight" in name:
lowerCamelCase_ : Dict = '''weight'''
else:
lowerCamelCase_ : Dict = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : str = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ : Any = name.split('''.''' )
lowerCamelCase_ : List[str] = int(items[0] )
lowerCamelCase_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase_ : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase_ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase_ : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[str] = full_name.split('''adaptor.''' )[-1]
lowerCamelCase_ : Tuple = name.split('''.''' )
if items[1].isdigit():
lowerCamelCase_ : Optional[int] = int(items[1] )
else:
lowerCamelCase_ : List[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowerCamelCase_ : Optional[int] = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowerCamelCase_ : Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowerCamelCase_ : str = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowerCamelCase_ : List[str] = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowerCamelCase_ : Dict = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowerCamelCase_ : Optional[Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Dict = emb.weight.shape
lowerCamelCase_ : List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
lowerCamelCase_ : int = WavaVecaConfig.from_pretrained(
__UpperCAmelCase , add_adapter=__UpperCAmelCase , adapter_stride=__UpperCAmelCase , adapter_kernel_size=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , output_hidden_size=__UpperCAmelCase , )
lowerCamelCase_ : List[str] = MBartConfig.from_pretrained(__UpperCAmelCase )
# load model
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowerCamelCase_ : List[Any] = model[0].eval()
# load feature extractor
lowerCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase , use_auth_token=__UpperCAmelCase )
# set weights for wav2vec2 encoder
lowerCamelCase_ : int = WavaVecaModel(__UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , __UpperCAmelCase )
# load decoder weights
lowerCamelCase_ : str = MBartForCausalLM(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCAmelCase )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCamelCase_ : str = SpeechEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Optional[int] = MBartaaTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = hf_wavavec.config.to_dict()
lowerCamelCase_ : Union[str, Any] = tokenizer.pad_token_id
lowerCamelCase_ : Union[str, Any] = tokenizer.bos_token_id
lowerCamelCase_ : Optional[int] = tokenizer.eos_token_id
lowerCamelCase_ : str = '''mbart50'''
lowerCamelCase_ : Union[str, Any] = '''wav2vec2'''
lowerCamelCase_ : List[str] = tokenizer.eos_token_id
lowerCamelCase_ : Any = 250004
lowerCamelCase_ : Optional[Any] = tokenizer.eos_token_id
lowerCamelCase_ : Optional[int] = SpeechEncoderDecoderConfig.from_dict(__UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
feature_extractor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
__lowerCamelCase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 501 |
'''simple docstring'''
from collections.abc import Callable
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : float = a
lowerCamelCase_ : float = b
if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCAmelCase ) == 0:
return b
elif (
function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCamelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCAmelCase ) == 0:
return mid
elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0:
lowerCamelCase_ : List[str] = mid
else:
lowerCamelCase_ : Any = mid
lowerCamelCase_ : int = start + (end - start) / 2.0
return mid
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 501 | 1 |
def lowerCamelCase( a__):
return " ".join(
''''''.join(word[::-1]) if len(a__) > 4 else word for word in sentence.split())
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw''')) | 191 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ : Any = datasets.utils.logging.get_logger(__name__)
snake_case_ : List[Any] = ['''names''', '''prefix''']
snake_case_ : Dict = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ : Dict = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ : Optional[Any] = ['''date_format''']
@dataclass
class A__ ( datasets.BuilderConfig ):
UpperCAmelCase = ","
UpperCAmelCase = None
UpperCAmelCase = "infer"
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = "."
UpperCAmelCase = None
UpperCAmelCase = '"'
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = 10000
UpperCAmelCase = None
UpperCAmelCase = "strict"
UpperCAmelCase = "error"
UpperCAmelCase = None
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE =self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE =self.column_names
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = CsvConfig
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , _a : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCamelCase ( self : Tuple , _a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(_a , _a )
return pa_table
def __UpperCamelCase ( self : str , _a : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_SCREAMING_SNAKE_CASE =pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_SCREAMING_SNAKE_CASE =pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise | 191 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowercase = requests.get(_snake_case , headers=_snake_case ).json()
__lowercase = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_snake_case ):
__lowercase = requests.get(url + F'&page={i + 2}' , headers=_snake_case ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__lowercase = requests.get(_snake_case , headers=_snake_case ).json()
__lowercase = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_snake_case ):
__lowercase = requests.get(url + F'&page={i + 2}' , headers=_snake_case ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase = requests.get(_snake_case , headers=_snake_case , allow_redirects=_snake_case )
__lowercase = result.headers['Location']
__lowercase = requests.get(_snake_case , allow_redirects=_snake_case )
__lowercase = os.path.join(_snake_case , F'{artifact_name}.zip' )
with open(_snake_case , '''wb''' ) as fp:
fp.write(response.content )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = None
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_snake_case ) as f:
for line in f:
__lowercase = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowercase = line[: line.index(''': ''' )]
__lowercase = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__lowercase = line[len('''FAILED ''' ) :]
failed_tests.append(_snake_case )
elif filename == "job_name.txt":
__lowercase = line
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_snake_case )} for `errors` '
F'and {len(_snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
''' problem.''' )
__lowercase = None
if job_name and job_links:
__lowercase = job_links.get(_snake_case , _snake_case )
# A list with elements of the form (line of error, error, failed test)
__lowercase = [x + [y] + [job_link] for x, y in zip(_snake_case , _snake_case )]
return result
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = []
__lowercase = [os.path.join(_snake_case , _snake_case ) for p in os.listdir(_snake_case ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_snake_case , job_links=_snake_case ) )
return errors
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = Counter()
counter.update([x[1] for x in logs] )
__lowercase = counter.most_common()
__lowercase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowercase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__lowercase = dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_snake_case ) )
return r
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__lowercase = test.split('''/''' )[2]
else:
__lowercase = None
return test
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowercase = [x for x in logs if x[2] is not None]
__lowercase = {x[2] for x in logs}
__lowercase = {}
for test in tests:
__lowercase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowercase = counter.most_common()
__lowercase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowercase = sum(error_counts.values() )
if n_errors > 0:
__lowercase = {'count': n_errors, 'errors': error_counts}
__lowercase = dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_snake_case ) )
return r
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = '| no. | error | status |'
__lowercase = '|-:|:-|:-|'
__lowercase = [header, sep]
for error in reduced_by_error:
__lowercase = reduced_by_error[error]['count']
__lowercase = F'| {count} | {error[:1_00]} | |'
lines.append(_snake_case )
return "\n".join(_snake_case )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = '| model | no. of errors | major error | count |'
__lowercase = '|-:|-:|-:|-:|'
__lowercase = [header, sep]
for model in reduced_by_model:
__lowercase = reduced_by_model[model]['count']
__lowercase = list(reduced_by_model[model]['''errors'''].items() )[0]
__lowercase = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_snake_case )
return "\n".join(_snake_case )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : List[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : int = get_job_links(args.workflow_run_id, token=args.token)
a : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : int = k[index + len(''' / ''') :]
a : Any = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : Any = reduce_by_error(errors)
a : Optional[int] = reduce_by_model(errors)
a : Union[str, Any] = make_github_table(reduced_by_error)
a : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 639 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Any = KandinskyImgaImgPipeline
UpperCamelCase : int = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase : List[str] = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 100
@property
def __snake_case ( self ):
UpperCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : Tuple = MultilingualCLIP(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def __snake_case ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase__ : int = self.dummy_tokenizer
UpperCAmelCase__ : int = self.dummy_unet
UpperCAmelCase__ : Optional[Any] = self.dummy_movq
UpperCAmelCase__ : Any = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCAmelCase__ : List[Any] = DDIMScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase_ )
# create init_image
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = 'cpu'
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
UpperCAmelCase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
UpperCAmelCase__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ : Any = 'A red cartoon frog, 4k'
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
UpperCAmelCase__ : Any = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase__ : Optional[Any] = pipeline(
UpperCamelCase_ , image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110 | 0 |
'''simple docstring'''
import math
import random
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase__ = 0.02
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowerCAmelCase ):
# Forward propagation
_UpperCAmelCase : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase : int = (expected / 100) - layer_a
# Error delta
_UpperCAmelCase : Union[str, Any] = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input('Expected value: '))
lowerCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 40 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
"""simple docstring"""
__snake_case = ['onnx']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def a__ ( cls , *_lowercase , **_lowercase ) -> Any:
requires_backends(cls , ['''onnx'''] )
@classmethod
def a__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['''onnx'''] )
| 434 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ) ) )
def lowerCAmelCase( __lowerCamelCase ):
if point:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for item in point:
if not isinstance(__lowerCamelCase , (int, float) ):
__a = (
'Expected a list of numbers as input, found '
f'''{type(__lowerCamelCase ).__name__}'''
)
raise TypeError(__lowerCamelCase )
else:
__a = f'''Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}'''
raise TypeError(__lowerCamelCase )
else:
raise ValueError('Missing an input' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase , __lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 0 |
import socket
def a__ ( ):
'''simple docstring'''
__magic_name__ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
__magic_name__ = socket.gethostname()
__magic_name__ = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""", """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
__magic_name__ = sock.recv(1024 )
if not data:
break
out_file.write(UpperCamelCase__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Tuple=None ) -> Any:
_A = start
_A = end
_A = val
_A = (start + end) // 2
_A = left
_A = right
def __repr__( self : str ) -> Optional[int]:
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Sequence, UpperCamelCase__ : Any ) -> Optional[Any]:
_A = collection
_A = function
if self.collection:
_A = self._build_tree(0, len(UpperCamelCase__ ) - 1 )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Dict ) -> int:
self._update_tree(self.root, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
return self._query_range(self.root, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ) -> str:
if start == end:
return SegmentTreeNode(UpperCamelCase__, UpperCamelCase__, self.collection[start] )
_A = (start + end) // 2
_A = self._build_tree(UpperCamelCase__, UpperCamelCase__ )
_A = self._build_tree(mid + 1, UpperCamelCase__ )
return SegmentTreeNode(UpperCamelCase__, UpperCamelCase__, self.fn(left.val, right.val ), UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ) -> Tuple:
if node.start == i and node.end == i:
_A = val
return
if i <= node.mid:
self._update_tree(node.left, UpperCamelCase__, UpperCamelCase__ )
else:
self._update_tree(node.right, UpperCamelCase__, UpperCamelCase__ )
_A = self.fn(node.left.val, node.right.val )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : int ) -> Tuple:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left, UpperCamelCase__, UpperCamelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left, UpperCamelCase__, node.mid ), self._query_range(node.right, node.mid + 1, UpperCamelCase__ ), )
else:
# range in right child tree
return self._query_range(node.right, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if self.root is not None:
_A = Queue()
queue.put(self.root )
while not queue.empty():
_A = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
_UpperCAmelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 107 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase =[
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCAmelCase ={
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase ={f"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase ={f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = 2
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=True ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<sep>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<cls>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=None ,lowerCamelCase_="##" ,**lowerCamelCase_ ,) -> List[Any]:
super().__init__(
lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,clean_text=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,wordpieces_prefix=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase_ ) != tokenize_chinese_chars
):
A = getattr(lowerCamelCase_ ,normalizer_state.pop("""type""" ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**lowerCamelCase_ )
A = do_lower_case
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> List[Any]:
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
A = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 617 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__UpperCamelCase : Any = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__UpperCamelCase : Tuple = logging.WARNING
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = os.getenv('''DATASETS_VERBOSITY''' , _lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def A ( ):
return __name__.split('''.''' )[0]
def A ( ):
return logging.getLogger(_get_library_name() )
def A ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A ( _lowercase = None ):
if name is None:
SCREAMING_SNAKE_CASE : Tuple = _get_library_name()
return logging.getLogger(_lowercase )
def A ( ):
return _get_library_root_logger().getEffectiveLevel()
def A ( _lowercase ):
_get_library_root_logger().setLevel(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
SCREAMING_SNAKE_CASE : Any = False
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase__ :
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ): # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
def empty_fn(*UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
'''simple docstring'''
return self
def __exit__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return
__UpperCamelCase : Optional[int] = True
class lowercase__ :
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=False , **UpperCamelCase__ : str ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ )
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__UpperCamelCase : Union[str, Any] = _tqdm_cls()
def A ( ):
global _tqdm_active
return bool(_tqdm_active )
def A ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Dict = True
def A ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Union[str, Any] = False
| 34 | import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = ConsistencyModelPipeline
A__ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A__ : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
A__ : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __A ( self ) -> List[Any]:
_UpperCAmelCase = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __A ( self ) -> str:
_UpperCAmelCase = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __A ( self , snake_case_=False ) -> int:
if class_cond:
_UpperCAmelCase = self.dummy_cond_unet
else:
_UpperCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __A ( self , snake_case_ , snake_case_=0 ) -> Any:
if str(snake_case_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __A ( self ) -> List[Any]:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> List[str]:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=snake_case_ )
_UpperCAmelCase = ConsistencyModelPipeline(**snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> int:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> Any:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=snake_case_ )
_UpperCAmelCase = ConsistencyModelPipeline(**snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , snake_case_=0 , snake_case_=False , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=(1, 3, 64, 64) ) -> Dict:
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_UpperCAmelCase = self.get_fixed_latents(seed=snake_case_ , device=snake_case_ , dtype=snake_case_ , shape=snake_case_ )
_UpperCAmelCase = latents
return inputs
def __A ( self , snake_case_=0 , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=(1, 3, 64, 64) ) -> Optional[Any]:
if type(snake_case_ ) == str:
_UpperCAmelCase = torch.device(snake_case_ )
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
return latents
def __A ( self ) -> str:
_UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ) -> List[str]:
_UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __A ( self ) -> int:
_UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __A ( self ) -> str:
_UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
_UpperCAmelCase = 1
_UpperCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
_UpperCAmelCase = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 426 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "xglm"
A__ : List[Any] = ["past_key_values"]
A__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=256008 , snake_case_=2048 , snake_case_=1024 , snake_case_=4096 , snake_case_=24 , snake_case_=16 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> List[str]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 426 | 1 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
__lowerCamelCase : List[str] = b * b - 4 * a * c
__lowerCamelCase : Union[str, Any] = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
__lowerCamelCase : List[str] = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 337 |
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : List[str] = []
__lowerCamelCase : List[str] = (
F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
__lowerCamelCase : Any = subprocess.run(lowerCamelCase__ , shell=lowerCamelCase__ , stdout=subprocess.PIPE )
__lowerCamelCase : List[Any] = output.stdout.decode('utf-8' )
__lowerCamelCase : Optional[Any] = json.loads(lowerCamelCase__ )
__lowerCamelCase : str = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCamelCase__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Union[str, Any] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return values.split(',' )
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
a =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 337 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = 0
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop("feature_extractor_type" )
_lowerCAmelCase = WavaVecaFeatureExtractor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
_lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
with self.assertRaisesRegex(
_lowerCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained("bert-base" )
def _snake_case ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowerCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , revision="aaaaaa" )
def _snake_case ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowerCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _snake_case ( self ) -> Union[str, Any]:
try:
AutoConfig.register("custom" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Optional[int]:
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = True
try:
AutoConfig.register("custom" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(_lowerCAmelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 18 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__: str = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCamelCase__: Union[str, Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCamelCase__: Any = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ) -> Dict:
return float((preds == labels).mean() )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Tuple:
UpperCAmelCase : List[Any] = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> List[Any]:
UpperCAmelCase : Any = np.array(_lowerCAmelCase )
UpperCAmelCase : Tuple = np.array(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase : str = en_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCAmelCase : Tuple = in_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCAmelCase : Optional[int] = cdist(_lowerCAmelCase , _lowerCAmelCase , '''cosine''' )
UpperCAmelCase : str = np.array(range(_lowerCAmelCase ) )
UpperCAmelCase : List[Any] = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase : Union[str, Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE( datasets.Metric ):
"""simple docstring"""
def A ( self : int ) -> Any:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Optional[int]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__snake_case , __snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 127 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
UpperCamelCase__ : Optional[int] = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.intaa ),
}
UpperCamelCase__ : List[Any] = model(__lowerCAmelCase )['''last_hidden_state''']
UpperCamelCase__ : Union[str, Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape, __lowerCAmelCase )
# compare the actual values for a slice.
UpperCamelCase__ : int = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 707 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=2, __magic_name__=56, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=2, __magic_name__=7, __magic_name__="gelu_new", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=4, __magic_name__="block_sparse", __magic_name__=True, __magic_name__=False, __magic_name__=2, __magic_name__=3, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : List[Any] = use_attention_mask
UpperCamelCase__ : Any = use_token_type_ids
UpperCamelCase__ : Tuple = use_labels
UpperCamelCase__ : Any = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Dict = num_choices
UpperCamelCase__ : Dict = rescale_embeddings
UpperCamelCase__ : Tuple = attention_type
UpperCamelCase__ : Union[str, Any] = use_bias
UpperCamelCase__ : Union[str, Any] = block_size
UpperCamelCase__ : Optional[int] = num_random_blocks
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : int = None
if self.use_attention_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : str = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a : List[str] = False
a : str = False
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Union[str, Any] = self._prepare_for_class(__magic_name__, __magic_name__ )
UpperCamelCase__ : Tuple = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__, __magic_name__=None, **__magic_name__ ):
return model(input_ids=__magic_name__, attention_mask=__magic_name__, **__magic_name__ )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : str = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : Dict = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__, __magic_name__ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=1E-5, __magic_name__="outputs", __magic_name__=None ) -> List[Any]:
"""simple docstring"""
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
| 369 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> list:
SCREAMING_SNAKE_CASE__ : List[str] = int(__lowerCAmelCase )
if n_element < 1:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError("""a should be a positive number""" )
raise my_error
SCREAMING_SNAKE_CASE__ : int = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = (0, 0, 0)
SCREAMING_SNAKE_CASE__ : List[str] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a :List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
a :str = hamming(int(n))
print("-----------------------------------------------------")
print(f'The list with nth numbers is: {hamming_numbers}')
print("-----------------------------------------------------")
| 680 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__A : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__A : Dict = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__A : Dict = '''zero2'''
__A : Optional[Any] = '''zero3'''
__A : Optional[int] = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : int = parameterized.to_safe_name('_'.join(str(_UpperCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
__A : str = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( lowerCAmelCase ):
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowercase__ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ):
lowerCAmelCase : Optional[int] = models[model]
lowerCAmelCase : Tuple = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowercase__ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ):
lowerCAmelCase : str = self.get_auto_remove_tmp_dir('./xxx' , after=UpperCAmelCase_ )
lowerCAmelCase : int = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(UpperCAmelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase : Optional[Any] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
lowerCAmelCase : Optional[Any] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
lowerCAmelCase : Optional[Any] = self.get_launcher(UpperCAmelCase_ )
lowerCAmelCase : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowerCAmelCase : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 343 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = KandinskyInpaintPipeline
lowerCAmelCase_ : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCAmelCase_ : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCAmelCase_ : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ : Tuple = False
@property
def lowercase__ ( self : List[str] ):
return 32
@property
def lowercase__ ( self : int ):
return 32
@property
def lowercase__ ( self : str ):
return self.time_input_dim
@property
def lowercase__ ( self : str ):
return self.time_input_dim * 4
@property
def lowercase__ ( self : Any ):
return 100
@property
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase__ ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase : int = MultilingualCLIP(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : str = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowercase__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : str ):
lowerCAmelCase : Dict = self.dummy_text_encoder
lowerCAmelCase : List[Any] = self.dummy_tokenizer
lowerCAmelCase : Optional[int] = self.dummy_unet
lowerCAmelCase : Tuple = self.dummy_movq
lowerCAmelCase : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCAmelCase : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase__ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=0 ):
lowerCAmelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Any = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowerCAmelCase : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase : List[Any] = 0
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : List[str] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = 'cpu'
lowerCAmelCase : Tuple = self.get_dummy_components()
lowerCAmelCase : Tuple = self.pipeline_class(**UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCAmelCase : int = output.images
lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def lowercase__ ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = 'a hat'
lowerCAmelCase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase : Union[str, Any] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase : Optional[int] = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 343 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=224 , UpperCAmelCase_ : Tuple=1000 , UpperCAmelCase_ : str=[3, 3, 6, 4] , UpperCAmelCase_ : str=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = layer_depths
SCREAMING_SNAKE_CASE__ = embed_dims
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A_ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int ):
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A__ : List[Any] =(
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A__ : Tuple =False
A__ : List[Any] =False
A__ : List[str] =False
A__ : Tuple =False
A__ : int =False
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def A_ ( self : Tuple ):
pass
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A_ ( self : Dict ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def A_ ( self : Dict ):
pass
def A_ ( self : int ):
def check_hidden_states_output(UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( self : Dict ):
def _config_zero_init(UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A_ ( self : Optional[Any] ):
pass
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : int ):
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 709 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : List[Any]=[32, 64, 128] , UpperCAmelCase_ : Tuple=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=2.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[int]=8 , UpperCAmelCase_ : int=["stage1", "stage2"] , UpperCAmelCase_ : Optional[int]=[1, 2] , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = patch_norm
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = out_indices
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = FocalNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FocalNetForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Any =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] =(
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] =False
A__ : Dict =False
A__ : Optional[Any] =False
A__ : str =False
A__ : Any =False
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = FocalNetModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 , has_text_modality=UpperCAmelCase_ )
def A_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Union[str, Any] ):
return
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def A_ ( self : Tuple ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def A_ ( self : Dict ):
pass
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
@slow
def A_ ( self : int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FocalNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(FocalNetBackbone,) if is_torch_available() else ()
A__ : Tuple =FocalNetConfig
A__ : Optional[int] =False
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = FocalNetModelTester(self )
| 400 | 0 |
"""simple docstring"""
import sys
import turtle
def lowerCAmelCase_ ( UpperCamelCase__ : tuple[float, float] , UpperCamelCase__ : tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCAmelCase_ ( UpperCamelCase__ : tuple[float, float] , UpperCamelCase__ : tuple[float, float] , UpperCamelCase__ : tuple[float, float] , UpperCamelCase__ : int , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCAmelCase__ =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCAmelCase__ =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 616 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__ , n - 1 , UpperCamelCase__ ) * a) % mod
else:
__lowercase = binary_exponentiation(UpperCamelCase__ , n / 2 , UpperCamelCase__ )
return (b * b) % mod
# a prime number
UpperCAmelCase__ =701
UpperCAmelCase__ =10_0000_0000
UpperCAmelCase__ =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 616 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A__ : List[str] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> str:
lowerCamelCase_ : Tuple =test_results.split(" " )
lowerCamelCase_ : List[str] =0
lowerCamelCase_ : List[str] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ : Dict =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _snake_case ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Optional[Any] ={}
lowerCamelCase_ : List[str] =None
lowerCamelCase_ : Dict =False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowerCamelCase__ ):
lowerCamelCase_ : Any =True
lowerCamelCase_ : Optional[Any] =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
lowerCamelCase_ : Union[str, Any] =line
lowerCamelCase_ : List[Any] =False
return failures
class lowercase__ :
def __init__( self : Optional[int] , snake_case__ : str , snake_case__ : Dict ):
lowerCamelCase_ : Optional[Any] =title
lowerCamelCase_ : Optional[int] =doc_test_results["time_spent"].split("," )[0]
lowerCamelCase_ : Optional[int] =doc_test_results["success"]
lowerCamelCase_ : List[str] =doc_test_results["failures"]
lowerCamelCase_ : List[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ : List[Any] =doc_test_results
@property
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[str] =[self._time_spent]
lowerCamelCase_ : Tuple =0
for time in time_spent:
lowerCamelCase_ : Tuple =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case__ ) == 1:
lowerCamelCase_ : Dict =[0, 0, time_parts[0]]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(snake_case__ )}h{int(snake_case__ )}m{int(snake_case__ )}s"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def UpperCAmelCase__ ( self : int ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =40
lowerCamelCase_ : Any ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(snake_case__ , snake_case__ )}
lowerCamelCase_ : List[Any] =""
for category, failures in category_failures.items():
if len(snake_case__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : List[Any] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case__ )
@staticmethod
def UpperCAmelCase__ ( ):
lowerCamelCase_ : Any =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(snake_case__ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=snake_case__ , )
def UpperCAmelCase__ ( self : List[Any] ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
lowerCamelCase_ : Union[str, Any] =F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
lowerCamelCase_ : Tuple =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=snake_case__ , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCamelCase_ : Optional[Any] =""
for key, value in failures.items():
lowerCamelCase_ : Union[str, Any] =value[:200] + " [Truncated]" if len(snake_case__ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase_ : Dict =job_name
lowerCamelCase_ : Optional[int] ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
lowerCamelCase_ : int ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
lowerCamelCase_ : List[Any] =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
lowerCamelCase_ : str =sorted(self.doc_test_results.items() , key=lambda snake_case__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
lowerCamelCase_ : str =F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase_ : Optional[int] =job_result["failures"]
lowerCamelCase_ : List[str] =self.get_reply_blocks(snake_case__ , snake_case__ , snake_case__ , text=snake_case__ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"""Results for {job}""" , blocks=snake_case__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _snake_case ( ) -> List[Any]:
lowerCamelCase_ : Optional[Any] =os.environ["GITHUB_RUN_ID"]
lowerCamelCase_ : Optional[Any] =F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase_ : str =requests.get(lowerCamelCase__ ).json()
lowerCamelCase_ : List[Any] ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCamelCase_ : Optional[int] =math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowerCamelCase__ ):
lowerCamelCase_ : int =requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowerCamelCase__ )
return {}
def _snake_case ( lowerCamelCase__ : str ) -> Dict:
lowerCamelCase_ : List[Any] ={}
if os.path.exists(lowerCamelCase__ ):
lowerCamelCase_ : Optional[int] =os.listdir(lowerCamelCase__ )
for file in files:
try:
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , encoding="utf-8" ) as f:
lowerCamelCase_ : Any =f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(lowerCamelCase__ , lowerCamelCase__ )}.""" ) from e
return _artifact
def _snake_case ( ) -> Optional[Any]:
class lowercase__ :
def __init__( self : Optional[int] , snake_case__ : str ):
lowerCamelCase_ : Tuple =name
lowerCamelCase_ : int =[]
def __str__( self : Any ):
return self.name
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
self.paths.append({"name": self.name, "path": path} )
lowerCamelCase_ : Dict[str, Artifact] ={}
lowerCamelCase_ : List[str] =filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ : Union[str, Any] =directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ : Optional[int] =Artifact(lowerCamelCase__ )
_available_artifacts[artifact_name].add_path(lowerCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
A__ : List[Any] = get_job_links()
A__ : str = retrieve_available_artifacts()
A__ : List[Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A__ : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A__ : Optional[int] = github_actions_job_links.get('run_doctests')
A__ : List[str] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A__ : Dict = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A__ , A__ , A__ : str = handle_test_results(artifact['stats'])
A__ : str = failed
A__ : int = success
A__ : List[str] = time_spent[1:-1] + ', '
A__ : Dict = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A__ : Tuple = line.replace('FAILED ', '')
A__ : str = line.split()[0].replace('\n', '')
if "::" in line:
A__ , A__ : Any = line.split('::')
else:
A__ , A__ : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A__ : Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A__ : Optional[Any] = all_failures[test] if test in all_failures else 'N/A'
A__ : List[Any] = failure
break
A__ : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 244 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Any , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : Union[str, Any] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : Optional[int] =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Tuple =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Optional[int] =self.encoder(snake_case__ )
lowerCamelCase_ : Dict =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int =self.quantize(snake_case__ )
else:
lowerCamelCase_ : Optional[int] =h
lowerCamelCase_ : Optional[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : List[str] =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : List[str] =sample
lowerCamelCase_ : Any =self.encode(snake_case__ ).latents
lowerCamelCase_ : List[Any] =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 244 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
if len(__lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__UpperCAmelCase : Optional[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( a_ :BertModel , a_ :str , a_ :str) -> str:
__a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(a_):
os.makedirs(a_)
__a : List[Any] = model.state_dict()
def to_tf_var_name(a_ :str):
for patt, repl in iter(a_):
__a : int = name.replace(a_ , a_)
return F"""bert/{name}"""
def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session):
__a : int = tf.dtypes.as_dtype(tensor.dtype)
__a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(a_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(a_)
__a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__a : List[Any] = torch_tensor.T
__a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_)
tf.keras.backend.set_value(a_ , a_)
__a : int = session.run(a_)
print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""")
__a : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def __A ( a_ :int=None) -> str:
__a : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''')
__a : Optional[Any] = parser.parse_args(a_)
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main() | 52 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "xlm"
_UpperCamelCase : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , a__=30145 , a__=2048 , a__=12 , a__=16 , a__=0.1 , a__=0.1 , a__=True , a__=False , a__=False , a__=False , a__=1 , a__=True , a__=512 , a__=2048**-0.5 , a__=1e-12 , a__=0.0_2 , a__=0 , a__=1 , a__=2 , a__=3 , a__=5 , a__=True , a__="first" , a__=True , a__=None , a__=True , a__=0.1 , a__=5 , a__=5 , a__=0 , a__=0 , a__=2 , a__=0 , **a__ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = emb_dim
_lowerCAmelCase : Union[str, Any] = n_layers
_lowerCAmelCase : str = n_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : Optional[Any] = gelu_activation
_lowerCAmelCase : Tuple = sinusoidal_embeddings
_lowerCAmelCase : Optional[int] = causal
_lowerCAmelCase : List[str] = asm
_lowerCAmelCase : Dict = n_langs
_lowerCAmelCase : str = use_lang_emb
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = bos_index
_lowerCAmelCase : int = eos_index
_lowerCAmelCase : str = pad_index
_lowerCAmelCase : List[str] = unk_index
_lowerCAmelCase : Optional[int] = mask_index
_lowerCAmelCase : str = is_encoder
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = embed_init_std
_lowerCAmelCase : Optional[int] = init_std
_lowerCAmelCase : Optional[Any] = summary_type
_lowerCAmelCase : Union[str, Any] = summary_use_proj
_lowerCAmelCase : List[Any] = summary_activation
_lowerCAmelCase : Union[str, Any] = summary_proj_to_labels
_lowerCAmelCase : Optional[Any] = summary_first_dropout
_lowerCAmelCase : Optional[Any] = start_n_top
_lowerCAmelCase : List[Any] = end_n_top
_lowerCAmelCase : List[str] = mask_token_id
_lowerCAmelCase : Optional[int] = lang_id
if "n_words" in kwargs:
_lowerCAmelCase : Tuple = kwargs["""n_words"""]
super().__init__(pad_token_id=a__ , bos_token_id=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 213 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = ["torch", "torchsde"]
def __init__( self , *A_ , **A_ ) -> Dict:
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _a ( cls , *A_ , **A_ ) -> Dict:
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _a ( cls , *A_ , **A_ ) -> List[Any]:
requires_backends(cls , ['torch', 'torchsde'] )
| 682 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 1 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : list[tuple[float, float]] ):
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase__ ) - 1
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,lowercase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase__ ) ,5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase__ )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : float = 0.0_1 ):
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase__ ,lowercase__ ,color='''blue''' ,label='''Curve of Degree ''' + str(self.degree ) ,)
plt.scatter(lowercase__ ,lowercase__ ,color='''red''' ,label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """▁"""
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
UpperCamelCase = {
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
UpperCamelCase = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = []
snake_case = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : Any = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
A_ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : List[str] = 1
A_ : Union[str, Any] = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
A_ : Any = {v: k for k, v in self.lang_code_to_id.items()}
A_ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : List[Any] = src_lang if src_lang is not None else '''eng_Latn'''
A_ : int = self.lang_code_to_id[self._src_lang]
A_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self )->Tuple:
'''simple docstring'''
A_ : Dict = self.__dict__.copy()
A_ : Tuple = None
A_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self )->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : List[str] = [1] * len(self.prefix_tokens )
A_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : Optional[int] = src_lang
A_ : List[str] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
A_ : Any = tgt_lang_id
return inputs
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : int = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Any = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
A_ : Any = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "eng_Latn" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fra_Latn" , **_SCREAMING_SNAKE_CASE , )->BatchEncoding:
'''simple docstring'''
A_ : Optional[Any] = src_lang
A_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Any = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A_ : int = []
A_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Union[str, Any] = [self.cur_lang_code]
A_ : Tuple = [self.eos_token_id]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : List[Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A_ : Optional[int] = []
A_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Optional[Any] = [self.cur_lang_code]
A_ : Tuple = [self.eos_token_id]
| 720 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
UpperCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
A_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = vocab_file
A_ : List[Any] = False if not self.vocab_file else True
A_ : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
A_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : int = src_lang if src_lang is not None else '''en_XX'''
A_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
A_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self )->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : List[str] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : Optional[int] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
A_ : Any = tgt_lang_id
return inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , )->BatchEncoding:
'''simple docstring'''
A_ : Optional[int] = src_lang
A_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : str = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
A_ : int = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
A_ : int = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Union[str, Any] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
A_ : str = []
A_ : Tuple = [self.eos_token_id, self.cur_lang_code]
A_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 152 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_UpperCAmelCase = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :torch.nn.Module , SCREAMING_SNAKE_CASE :BnbQuantizationConfig , SCREAMING_SNAKE_CASE :Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE :Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE :Optional[List[str]] = None , SCREAMING_SNAKE_CASE :Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE :bool = False , ) -> Optional[Any]:
__lowerCAmelCase : Optional[Any] = bnb_quantization_config.load_in_abit
__lowerCAmelCase : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__lowerCAmelCase : Optional[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
__lowerCAmelCase : str = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__lowerCAmelCase : Any = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
__lowerCAmelCase : Tuple = load_in_abit
__lowerCAmelCase : Dict = load_in_abit
__lowerCAmelCase : Tuple = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__lowerCAmelCase : Dict = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
__lowerCAmelCase : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__lowerCAmelCase : List[str] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__lowerCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
__lowerCAmelCase : List[Any] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Tuple = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Dict=None ) -> Optional[Any]:
if device_map is None:
if torch.cuda.is_available():
__lowerCAmelCase : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__lowerCAmelCase : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : List[str] = special_dtypes
__lowerCAmelCase : List[str] = no_split_module_classes
__lowerCAmelCase : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__lowerCAmelCase : Tuple = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = max_memory
__lowerCAmelCase : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
__lowerCAmelCase : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__lowerCAmelCase : str = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Any=None ) -> Tuple:
if modules_to_not_convert is None:
__lowerCAmelCase : Any = []
__lowerCAmelCase , __lowerCAmelCase : str = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , ) -> Any:
__lowerCAmelCase : Dict = False
for name, module in model.named_children():
if current_key_name is None:
__lowerCAmelCase : Optional[int] = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__lowerCAmelCase : Optional[int] = """.""".join(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__lowerCAmelCase : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__lowerCAmelCase : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__lowerCAmelCase : Union[str, Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__lowerCAmelCase : Dict = module.weight.data
if module.bias is not None:
__lowerCAmelCase : Tuple = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = True
if len(list(module.children() ) ) > 0:
__lowerCAmelCase , __lowerCAmelCase : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
# Create a copy of the model
with init_empty_weights():
__lowerCAmelCase : List[str] = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__lowerCAmelCase : str = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__lowerCAmelCase : List[str] = sum(SCREAMING_SNAKE_CASE , [] )
__lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
__lowerCAmelCase : Dict = False
if hasattr(SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
__lowerCAmelCase : Tuple = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCAmelCase : List[Any] = list(model.named_children() )
__lowerCAmelCase : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCAmelCase : Union[str, Any] = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
__lowerCAmelCase : Optional[Any] = [""".weight""", """.bias"""]
__lowerCAmelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCAmelCase : str = name.replace(SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Tuple:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :nn.Module ) -> Optional[int]:
return next(parameter.parameters() ).device
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = param_name
__lowerCAmelCase : Tuple = model
if "." in tensor_name:
__lowerCAmelCase : str = tensor_name.split(""".""" )
for split in splits[:-1]:
__lowerCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
__lowerCAmelCase : Any = new_module
__lowerCAmelCase : Union[str, Any] = splits[-1]
# offload weights
__lowerCAmelCase : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """meta""" , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) ) | 504 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE ) )
return data_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = min(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__lowerCAmelCase : int = F'''Invalid weight of {weight:f} provided'''
raise ValueError(SCREAMING_SNAKE_CASE )
score_lists.append(SCREAMING_SNAKE_CASE )
return score_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[float]:
__lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = final_scores[j] + ele
return final_scores
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : str = get_data(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = calculate_each_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = generate_final_scores(SCREAMING_SNAKE_CASE )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE ):
source_data[i].append(SCREAMING_SNAKE_CASE )
return source_data | 504 | 1 |
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = len(set_a.intersection(_UpperCamelCase ) )
if alternative_union:
__lowercase = len(_UpperCamelCase ) + len(_UpperCamelCase )
else:
__lowercase = len(set_a.union(_UpperCamelCase ) )
return intersection / union
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(_UpperCamelCase , (list, tuple) ):
__lowercase = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase = len(_UpperCamelCase ) + len(_UpperCamelCase )
return len(_UpperCamelCase ) / union
else:
__lowercase = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return None
if __name__ == "__main__":
a : List[Any] = {'''a''', '''b''', '''c''', '''d''', '''e'''}
a : List[str] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 527 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit_text_model"
def __init__( self , snake_case_=4_9_4_0_8 , snake_case_=5_1_2 , snake_case_=2_0_4_8 , snake_case_=1_2 , snake_case_=8 , snake_case_=1_6 , snake_case_="quick_gelu" , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1.0 , snake_case_=0 , snake_case_=4_9_4_0_6 , snake_case_=4_9_4_0_7 , **snake_case_ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit_vision_model"
def __init__( self , snake_case_=7_6_8 , snake_case_=3_0_7_2 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3 , snake_case_=7_6_8 , snake_case_=3_2 , snake_case_="quick_gelu" , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1.0 , **snake_case_ , ) -> Any:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit"
__UpperCAmelCase = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=5_1_2 , snake_case_=2.6_5_9_2 , snake_case_=True , **snake_case_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case_ )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowercase = OwlViTTextConfig(**snake_case_ )
__lowercase = OwlViTVisionConfig(**snake_case_ )
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = return_dict
__lowercase = 1.0
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
@classmethod
def A ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> int:
'''simple docstring'''
__lowercase = {}
__lowercase = text_config
__lowercase = vision_config
return cls.from_dict(snake_case_ , **snake_case_ )
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def A ( self ) -> float:
'''simple docstring'''
return 1e-4
def A ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case_ , seq_length=snake_case_ , framework=snake_case_ )
__lowercase = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case_ , framework=snake_case_ )
return {**text_input_dict, **image_input_dict}
@property
def A ( self ) -> int:
'''simple docstring'''
return 1_4
| 527 | 1 |
from ....utils import logging
a_ : str = logging.get_logger(__name__)
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
def __init__(self , __a , __a=None , __a=20_48 ):
'''simple docstring'''
lowerCamelCase = config.__dict__
lowerCamelCase = modal_hidden_size
if num_labels:
lowerCamelCase = num_labels | 623 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def A (__A : Union[str, Any] = "AAPL" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCAmelCase_ = BeautifulSoup(requests.get(_lowerCAmelCase ).text , '''html.parser''' )
UpperCAmelCase_ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 701 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
snake_case_ : Tuple = False
class __snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
image=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 169 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> int:
for pegasus_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
return k
def _lowerCAmelCase ( __snake_case : dict , __snake_case : dict ) -> PegasusForConditionalGeneration:
__A : Dict = DEFAULTS.copy()
cfg_kwargs.update(__snake_case )
__A : List[Any] = PegasusConfig(**__snake_case )
__A : Dict = PegasusForConditionalGeneration(__snake_case )
__A : Tuple = torch_model.model.state_dict()
__A : int = {}
for k, v in tf_weights.items():
__A : int = rename_state_dict_key(__snake_case )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
__A : int = v.T
__A : Optional[int] = torch.tensor(__snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
__A : Optional[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__A : Tuple = mapping['shared.weight']
__A : Dict = mapping['shared.weight']
__A : int = {k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__snake_case )
__A ,__A : List[Any] = torch_model.model.load_state_dict(__snake_case , strict=__snake_case )
__A : int = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCAmelCase ( __snake_case : List[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__A : str = tf.train.list_variables(__snake_case )
__A : List[Any] = {}
__A : str = ['Adafactor', 'global_step']
for name, shape in tqdm(__snake_case , desc='converting tf checkpoint to dict' ):
__A : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__A : Tuple = tf.train.load_variable(__snake_case , __snake_case )
__A : Dict = array
return tf_weights
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Tuple:
# save tokenizer first
__A : int = Path(__snake_case ).parent.name
__A : int = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
__A : str = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case )
# convert model
__A : Dict = get_tf_weights_as_numpy(__snake_case )
__A : List[str] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
__A : str = task_specific_params
__A : Any = convert_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
__A : Optional[int] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__snake_case , Path(__snake_case ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : Any = parser.parse_args()
if args.save_dir is None:
lowercase__ : Tuple = Path(args.tf_ckpt_path).parent.name
lowercase__ : Any = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = sum(lowerCamelCase )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 711 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=UpperCamelCase_ ):
_a = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> List[str]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
class __lowercase ( metaclass=UpperCamelCase_ ):
_a = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class __lowercase ( metaclass=UpperCamelCase_ ):
_a = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
class __lowercase ( metaclass=UpperCamelCase_ ):
_a = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Optional[int]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
| 539 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Any = scope
SCREAMING_SNAKE_CASE__ : int = projection_dim
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE__ : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRQuestionEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFDPRReader(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :int = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Any = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 680 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A__ ( __A , __A , __A = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__A ), magnitude * sin(__A )]
return [magnitude * cos(radians(__A ) ), magnitude * sin(radians(__A ) )]
def A__ ( __A , __A , __A = 10**-1 ):
'''simple docstring'''
_lowerCamelCase : NDArray[floataa] = cross(__A , __A )
_lowerCamelCase : float = sum(__A )
return abs(__A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase : List[str] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase : Any = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase : List[str] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCAmelCase : Any = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
def wrapper(*UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
__lowerCamelCase : Tuple = timeit.default_timer()
__lowerCamelCase : List[str] = func(*UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase : List[str] = timeit.default_timer() - starttime
return delta
__lowerCamelCase : int = func.__name__
return wrapper
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Optional[Any]=1_00 , UpperCAmelCase_ : Union[str, Any]=None ) -> List[str]:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Any = seq_shapes or {}
for i in range(UpperCAmelCase_ ):
__lowerCamelCase : Dict = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
__lowerCamelCase : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase : Optional[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
__lowerCamelCase : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
__lowerCamelCase : Optional[int] = v.feature
__lowerCamelCase : Union[str, Any] = seq_shapes[k]
__lowerCamelCase : int = np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
__lowerCamelCase : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=1_00 , UpperCAmelCase_ : Tuple=None ) -> Any:
__lowerCamelCase : Optional[int] = generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
__lowerCamelCase : Union[str, Any] = features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__lowerCamelCase : Dict = datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 13 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase__ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCAmelCase__ = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = 2
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<sep>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_="##" , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , clean_text=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , wordpieces_prefix=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowercase = getattr(lowerCAmelCase_ , normalizer_state.pop("type" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowerCAmelCase_ )
__lowercase = do_lower_case
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 321 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
SCREAMING_SNAKE_CASE_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = 'https://pypi.org/pypi/diffusers/json'
UpperCAmelCase = json.loads(request.urlopen(lowerCAmelCase ).read() )['releases'].keys()
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : version.Version(lowerCAmelCase ) )
def lowercase__ ( ) -> int:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase = Path(lowerCAmelCase ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def lowercase__ ( lowerCAmelCase : Union[str, os.PathLike] ) -> Tuple:
"""simple docstring"""
init_hf_modules()
UpperCAmelCase = Path(lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase ) )
def lowercase__ ( lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = False
UpperCAmelCase = [module_file]
UpperCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase ) )
UpperCAmelCase = Path(lowerCAmelCase ).parent
UpperCAmelCase = [str(module_path / m ) for m in new_imports]
UpperCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase = [F"{f}.py" for f in new_import_files]
UpperCAmelCase = len(lowerCAmelCase ) == 0
all_relative_imports.extend(lowerCAmelCase )
return all_relative_imports
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import xxx`
UpperCAmelCase = re.findall('^\s*import\s+(\S+)\s*$' , lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
UpperCAmelCase = list(set(lowerCAmelCase ) )
UpperCAmelCase = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase )
except ImportError:
missing_packages.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F"{', '.join(lowerCAmelCase )}. Run `pip install {' '.join(lowerCAmelCase )}`" )
return get_relative_imports(lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : str ) -> int:
"""simple docstring"""
UpperCAmelCase = module_path.replace(os.path.sep , '.' )
UpperCAmelCase = importlib.import_module(lowerCAmelCase )
if class_name is None:
return find_pipeline_class(lowerCAmelCase )
return getattr(lowerCAmelCase , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
UpperCAmelCase = dict(inspect.getmembers(lowerCAmelCase , inspect.isclass ) )
UpperCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
UpperCAmelCase = cls
return pipeline_class
def lowercase__ ( lowerCAmelCase : Union[str, os.PathLike] , lowerCAmelCase : str , lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict[str, str]] = None , lowerCAmelCase : Optional[Union[bool, str]] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = False , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ):
UpperCAmelCase = module_file_or_url
UpperCAmelCase = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
UpperCAmelCase = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
UpperCAmelCase = F"v{revision}"
elif revision == "main":
UpperCAmelCase = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
UpperCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase , pipeline=lowerCAmelCase )
try:
UpperCAmelCase = cached_download(
lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , use_auth_token=lowerCAmelCase , )
UpperCAmelCase = 'git'
UpperCAmelCase = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase = hf_hub_download(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , use_auth_token=lowerCAmelCase , )
UpperCAmelCase = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
UpperCAmelCase = check_imports(lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase )
UpperCAmelCase = Path(lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase = F"{module_needed}.py"
shutil.copy(os.path.join(lowerCAmelCase , lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = use_auth_token
elif use_auth_token is True:
UpperCAmelCase = HfFolder.get_token()
else:
UpperCAmelCase = None
UpperCAmelCase = model_info(lowerCAmelCase , revision=lowerCAmelCase , token=lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase = submodule_path / commit_hash
UpperCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase , F"{module_needed}.py" , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
return os.path.join(lowerCAmelCase , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : Union[str, os.PathLike] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict[str, str]] = None , lowerCAmelCase : Optional[Union[bool, str]] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = False , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = get_cached_module_file(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
return get_class_in_module(lowerCAmelCase , final_module.replace('.py' , '' ) )
| 183 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def a_ ( lowercase_ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def a_ ( self ) -> Any:
raise NotImplementedError()
| 183 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(lowerCAmelCase , """_dynamo""" ):
return False
return isinstance(lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __A(lowerCAmelCase , lowerCAmelCase = True ) -> Dict:
"""simple docstring"""
_UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase = is_compiled_module(lowerCAmelCase )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase = getattr(lowerCAmelCase , """forward""" )
_UpperCamelCase = model.__dict__.pop("""_original_forward""" , lowerCAmelCase )
if original_forward is not None:
while hasattr(lowerCAmelCase , """__wrapped__""" ):
_UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase = forward
if getattr(lowerCAmelCase , """_converted_to_transformer_engine""" , lowerCAmelCase ):
convert_model(lowerCAmelCase , to_transformer_engine=lowerCAmelCase )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = compiled_model
return model
def __A() -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase , lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase , lowerCAmelCase )
@contextmanager
def __A(**lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase = str(lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __A(lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(lowerCAmelCase , """__qualname__""" ) and not hasattr(lowerCAmelCase , """__name__""" ):
_UpperCamelCase = getattr(lowerCAmelCase , """__class__""" , lowerCAmelCase )
if hasattr(lowerCAmelCase , """__qualname__""" ):
return obj.__qualname__
if hasattr(lowerCAmelCase , """__name__""" ):
return obj.__name__
return str(lowerCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
for key, value in source.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = destination.setdefault(lowerCAmelCase , {} )
merge_dicts(lowerCAmelCase , lowerCAmelCase )
else:
_UpperCamelCase = value
return destination
def __A(lowerCAmelCase = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 612 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] = "pix2struct_text_model"
UpperCamelCase_ : str = ["past_key_values"]
UpperCamelCase_ : str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=5_02_44 , a=7_68 , a=64 , a=20_48 , a=12 , a=12 , a=32 , a=1_28 , a=0.1 , a=1e-6 , a=1.0 , a="gelu_new" , a=0 , a=False , a=0 , a=1 , a=False , a=True , **a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = use_cache
_UpperCamelCase = eos_token_id
_UpperCamelCase = decoder_start_token_id
# for backwards compatibility
_UpperCamelCase = dense_act_fn
super().__init__(
pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , tie_word_embeddings=a , is_decoder=a , **a , )
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "pix2struct_vision_model"
def __init__( self , a=7_68 , a=7_68 , a=20_48 , a=64 , a=12 , a=12 , a="gelu_new" , a=1e-6 , a=0.0 , a=0.0 , a=1e-10 , a=1.0 , a=40_96 , a=32 , a=1_28 , **a , ) -> Tuple:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = hidden_size
_UpperCamelCase = patch_embed_hidden_size
_UpperCamelCase = d_ff
_UpperCamelCase = dropout_rate
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_factor
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = dense_act_fn
_UpperCamelCase = seq_len
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = d_kv
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = "pix2struct"
UpperCamelCase_ : int = True
def __init__( self , a=None , a=None , a=1.0 , a=0.02 , a=False , a=False , a=True , **a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=a , is_encoder_decoder=a , **a )
if text_config is None:
_UpperCamelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_UpperCamelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_UpperCamelCase = PixaStructTextConfig(**a )
_UpperCamelCase = PixaStructVisionConfig(**a )
_UpperCamelCase = self.text_config.decoder_start_token_id
_UpperCamelCase = self.text_config.pad_token_id
_UpperCamelCase = self.text_config.eos_token_id
_UpperCamelCase = initializer_factor
_UpperCamelCase = initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = is_vqa
@classmethod
def A_ ( cls , a , a , **a ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 612 | 1 |
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
lowercase = ["""onnx"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def __lowerCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def __lowerCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 713 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Tuple = logging.get_logger(__name__)
__a : List[str] = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """visual_bert"""
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 414 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
while b:
_a , _a = b, a % b
return a
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b)
def lowerCAmelCase ():
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5)}''')
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3)}''')
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3)}''')
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6)}''')
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3)}''')
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5)}''')
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3)}''')
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3)}''')
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6)}''')
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3)}''')
if __name__ == "__main__":
main()
| 11 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=3_6 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_hidden_groups
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , sentence_order_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = AlbertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = AlbertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_choices
_lowerCamelCase = AlbertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ : int = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : str = True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def snake_case__ ( self ):
_lowerCamelCase = AlbertModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = AlbertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = AlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_lowerCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) )
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
if len(_lowerCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
__snake_case = __snake_case = sum(array[:k] )
for i in range(len(_lowerCAmelCase ) - k ):
__snake_case = current_sum - array[i] + array[i + k]
__snake_case = max(_lowerCAmelCase , _lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A : str = [randint(-1000, 1000) for i in range(100)]
A : Union[str, Any] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 371 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
assert _test_patching.open is open
__snake_case = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , _lowerCAmelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , "len" , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_start_and_stop_mock__"
__snake_case = patch_submodule(_test_patching , "open" , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = "__test_patch_submodule_successive_join__"
__snake_case = "__test_patch_submodule_successive_dirname__"
__snake_case = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
| 371 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : List[str] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""ibert"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=False , __a="none" , **__a , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = quant_mode
__lowerCAmelCase = force_dequant
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 282 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def snake_case ( self , __a ):
# configuration for running training on smdistributed Model Parallel
__lowerCAmelCase = {
"enabled": True,
"processes_per_host": 8,
}
__lowerCAmelCase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowerCAmelCase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowerCAmelCase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def snake_case ( self , __a ):
TrainingJobAnalytics(__a ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def snake_case ( self , __a ):
# create estimator
__lowerCAmelCase = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 282 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> Optional[Any]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_UpperCAmelCase ), magnitude * sin(_UpperCAmelCase )]
return [magnitude * cos(radians(_UpperCAmelCase ) ), magnitude * sin(radians(_UpperCAmelCase ) )]
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1 ) -> str:
'''simple docstring'''
__lowercase = cross(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = sum(_UpperCAmelCase )
return abs(_UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase__ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
lowerCAmelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 321 | '''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a: str = 16
__a: Optional[Any] = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = "bert-base-cased" ):
lowercase__ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase__ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Optional[int] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Initialize accelerator
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config['''lr''']
lowercase__ : Dict = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : List[Any] = int(config['''batch_size'''] )
lowercase__ : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
# Instantiate optimizer
lowercase__ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase__ : int = 1
lowercase__ : int = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=0 , num_training_steps=UpperCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(UpperCAmelCase , total_num_steps=UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : str = 0
# Now we train the model
lowercase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowercase__ : Any = 0
lowercase__ : Optional[Any] = {}
for epoch in range(UpperCAmelCase , UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
lowercase__ : int = model(**UpperCAmelCase )
lowercase__ : Any = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ : Dict = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Optional[Any] = model(**UpperCAmelCase )
lowercase__ : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ : int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
lowercase__ : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
lowercase__ : Any = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowercase__ : Tuple = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCAmelCase , default=3 , help='''Number of train epochs.''' , )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ = logging.get_logger(__name__)
class snake_case_ ( _A):
lowerCamelCase :List[Any] = "AutoTokenizer"
lowerCamelCase :List[str] = ["tokenizer"]
lowerCamelCase :Any = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , __lowercase , __lowercase=None ) -> Dict:
super().__init__(__lowercase )
lowerCamelCase : Optional[Any] =speaker_embeddings
@classmethod
def __lowercase ( cls , __lowercase , __lowercase="speaker_embeddings_path.json" , **__lowercase ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
lowerCamelCase : str =get_file_from_repo(
__lowercase , __lowercase , subfolder=kwargs.pop('''subfolder''' , __lowercase ) , cache_dir=kwargs.pop('''cache_dir''' , __lowercase ) , force_download=kwargs.pop('''force_download''' , __lowercase ) , proxies=kwargs.pop('''proxies''' , __lowercase ) , resume_download=kwargs.pop('''resume_download''' , __lowercase ) , local_files_only=kwargs.pop('''local_files_only''' , __lowercase ) , use_auth_token=kwargs.pop('''use_auth_token''' , __lowercase ) , revision=kwargs.pop('''revision''' , __lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__lowercase , __lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCamelCase : Tuple =None
else:
with open(__lowercase ) as speaker_embeddings_json:
lowerCamelCase : Optional[int] =json.load(__lowercase )
else:
lowerCamelCase : Optional[Any] =None
lowerCamelCase : int =AutoTokenizer.from_pretrained(__lowercase , **__lowercase )
return cls(tokenizer=__lowercase , speaker_embeddings=__lowercase )
def __lowercase ( self , __lowercase , __lowercase="speaker_embeddings_path.json" , __lowercase="speaker_embeddings" , __lowercase = False , **__lowercase , ) -> List[Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__lowercase , __lowercase , '''v2''' ) , exist_ok=__lowercase )
lowerCamelCase : List[str] ={}
lowerCamelCase : List[Any] =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCamelCase : str =self._load_voice_preset(__lowercase )
lowerCamelCase : str ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __lowercase , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__lowercase , )
lowerCamelCase : Tuple =os.path.join(__lowercase , F"{prompt_key}_{key}.npy" )
lowerCamelCase : Dict =tmp_dict
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as fp:
json.dump(__lowercase , __lowercase )
super().save_pretrained(__lowercase , __lowercase , **__lowercase )
def __lowercase ( self , __lowercase = None , **__lowercase ) -> Union[str, Any]:
lowerCamelCase : str =self.speaker_embeddings[voice_preset]
lowerCamelCase : Dict ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCamelCase : str =get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __lowercase ) , cache_dir=kwargs.pop('''cache_dir''' , __lowercase ) , force_download=kwargs.pop('''force_download''' , __lowercase ) , proxies=kwargs.pop('''proxies''' , __lowercase ) , resume_download=kwargs.pop('''resume_download''' , __lowercase ) , local_files_only=kwargs.pop('''local_files_only''' , __lowercase ) , use_auth_token=kwargs.pop('''use_auth_token''' , __lowercase ) , revision=kwargs.pop('''revision''' , __lowercase ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCamelCase : Tuple =np.load(__lowercase )
return voice_preset_dict
def __lowercase ( self , __lowercase = None ) -> Optional[int]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __lowercase=None , __lowercase=None , __lowercase="pt" , __lowercase=2_5_6 , __lowercase=False , __lowercase=True , __lowercase=False , **__lowercase , ) -> Any:
if voice_preset is not None and not isinstance(__lowercase , __lowercase ):
if (
isinstance(__lowercase , __lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCamelCase : Union[str, Any] =self._load_voice_preset(__lowercase )
else:
if isinstance(__lowercase , __lowercase ) and not voice_preset.endswith('''.npz''' ):
lowerCamelCase : Union[str, Any] =voice_preset + '''.npz'''
lowerCamelCase : List[str] =np.load(__lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(__lowercase , **__lowercase )
lowerCamelCase : Tuple =BatchFeature(data=__lowercase , tensor_type=__lowercase )
lowerCamelCase : Union[str, Any] =self.tokenizer(
__lowercase , return_tensors=__lowercase , padding='''max_length''' , max_length=__lowercase , return_attention_mask=__lowercase , return_token_type_ids=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
if voice_preset is not None:
lowerCamelCase : str =voice_preset
return encoded_text
| 262 |
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowerCamelCase : Optional[Any] =[
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowerCamelCase : Dict =[2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE_ , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase : Any =primes[:idx]
break
lowerCamelCase , lowerCamelCase : Any =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase : Union[str, Any] =False
for r in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] =pow(SCREAMING_SNAKE_CASE_ , d * 2**r , SCREAMING_SNAKE_CASE_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase : List[str] =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A__ ( ) -> None:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 262 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )]
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : List[str] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = ["".join(UpperCamelCase ) for row in temp_grid]
__UpperCAmelCase : Any = "".join(UpperCamelCase )
return output_string
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : str = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__UpperCAmelCase : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase : Tuple = input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
__UpperCAmelCase : List[str] = "" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _UpperCamelCase ( UpperCamelCase ) -> dict[int, str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = {}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
__UpperCAmelCase : str = decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | from pathlib import Path
import fire
def lowerCAmelCase_ ( lowercase: str , lowercase: str , lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: Any = Path(lowercase )
_UpperCamelCase: int = Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
_UpperCamelCase: List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase: int = dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open('''w''' ).write('''\n'''.join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify) | 271 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ : Optional[Any] = """http://www.mocksite.com/file1.txt"""
a_ : List[str] = """\"text\": [\"foo\", \"foo\"]"""
a_ : Optional[Any] = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __UpperCamelCase :
lowercase : int =2_00
lowercase : Union[str, Any] ={'Content-Length': '100'}
lowercase : Optional[int] ={}
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return [bytes(lowerCAmelCase, '''utf-8''' )]
def a_ ( *__snake_case : Optional[int] , **__snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def a_ ( __snake_case : Any , __snake_case : int , __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case , '''request''' , __snake_case )
lowerCamelCase_ =URL
if issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =url
elif issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =[url]
elif issubclass(__snake_case , __snake_case ):
lowerCamelCase_ ={'''train''': url}
lowerCamelCase_ ='''dummy'''
lowerCamelCase_ ='''downloads'''
lowerCamelCase_ =tmp_path
lowerCamelCase_ =DownloadConfig(
cache_dir=os.path.join(__snake_case , __snake_case ) , use_etag=__snake_case , )
lowerCamelCase_ =DownloadManager(dataset_name=__snake_case , download_config=__snake_case )
lowerCamelCase_ =dl_manager.download(__snake_case )
lowerCamelCase_ =urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[downloaded_paths]
lowerCamelCase_ =[urls]
elif isinstance(__snake_case , __snake_case ):
assert "train" in downloaded_paths.keys()
lowerCamelCase_ =downloaded_paths.values()
lowerCamelCase_ =urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case , __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase_ =Path(__snake_case )
lowerCamelCase_ =downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase_ =downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase_ =downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowerCamelCase_ =json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def a_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =str(__snake_case )
if issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =filename
elif issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =[filename]
elif issubclass(__snake_case , __snake_case ):
lowerCamelCase_ ={'''train''': filename}
lowerCamelCase_ ='''dummy'''
lowerCamelCase_ =xz_file.parent
lowerCamelCase_ ='''extracted'''
lowerCamelCase_ =DownloadConfig(
cache_dir=__snake_case , use_etag=__snake_case , )
lowerCamelCase_ =DownloadManager(dataset_name=__snake_case , download_config=__snake_case )
lowerCamelCase_ =dl_manager.extract(__snake_case )
lowerCamelCase_ =paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[extracted_paths]
lowerCamelCase_ =[paths]
elif isinstance(__snake_case , __snake_case ):
assert "train" in extracted_paths.keys()
lowerCamelCase_ =extracted_paths.values()
lowerCamelCase_ =paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case , __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase_ =Path(__snake_case )
lowerCamelCase_ =extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case , etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase_ =extracted_path.read_text()
lowerCamelCase_ =text_file.read_text()
assert extracted_file_content == expected_file_content
def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case , start=1 ):
lowerCamelCase_ =json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =request.getfixturevalue(__snake_case )
lowerCamelCase_ =DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
_test_jsonl(__snake_case , __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a_ ( __snake_case : Dict , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =request.getfixturevalue(__snake_case )
lowerCamelCase_ =DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
_test_jsonl(__snake_case , __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def a_ ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ) , start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 676 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = """trajectory_transformer"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Any , __magic_name__ :List[str]=100 , __magic_name__ :List[Any]=5 , __magic_name__ :Optional[int]=1 , __magic_name__ :Optional[Any]=1 , __magic_name__ :Optional[Any]=249 , __magic_name__ :Tuple=6 , __magic_name__ :Any=17 , __magic_name__ :str=25 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :List[str]=4 , __magic_name__ :Optional[Any]=128 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :str=0.0006 , __magic_name__ :Optional[int]=512 , __magic_name__ :Tuple=0.02 , __magic_name__ :Dict=1E-12 , __magic_name__ :Dict=1 , __magic_name__ :Dict=True , __magic_name__ :Optional[Any]=1 , __magic_name__ :Optional[int]=50_256 , __magic_name__ :Optional[int]=50_256 , **__magic_name__ :Optional[Any] , ) ->Optional[int]:
lowercase : int = vocab_size
lowercase : Optional[int] = action_weight
lowercase : Union[str, Any] = reward_weight
lowercase : Any = value_weight
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[str] = block_size
lowercase : int = action_dim
lowercase : Any = observation_dim
lowercase : List[Any] = transition_dim
lowercase : Optional[int] = learning_rate
lowercase : Any = n_layer
lowercase : Any = n_head
lowercase : Dict = n_embd
lowercase : List[str] = embd_pdrop
lowercase : List[Any] = attn_pdrop
lowercase : Dict = resid_pdrop
lowercase : Optional[int] = initializer_range
lowercase : int = layer_norm_eps
lowercase : str = kaiming_initializer_range
lowercase : List[Any] = use_cache
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 706 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : Union[str, Any] = sin(_A )
lowercase : Tuple = cos(_A )
lowercase : Any = _sin / (2 * q_factor)
lowercase : Any = (1 - _cos) / 2
lowercase : List[str] = 1 - _cos
lowercase : int = 1 + alpha
lowercase : Optional[int] = -2 * _cos
lowercase : str = 1 - alpha
lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Union[str, Any] = tau * frequency / samplerate
lowercase : Dict = sin(_A )
lowercase : List[Any] = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : Any = (1 + _cos) / 2
lowercase : Dict = -1 - _cos
lowercase : Tuple = 1 + alpha
lowercase : Tuple = -2 * _cos
lowercase : Any = 1 - alpha
lowercase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : Optional[int] = sin(_A )
lowercase : Any = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : Optional[int] = _sin / 2
lowercase : Dict = 0
lowercase : Any = -ba
lowercase : Any = 1 + alpha
lowercase : Union[str, Any] = -2 * _cos
lowercase : Any = 1 - alpha
lowercase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : List[Any] = sin(_A )
lowercase : Tuple = cos(_A )
lowercase : Any = _sin / (2 * q_factor)
lowercase : Dict = 1 - alpha
lowercase : int = -2 * _cos
lowercase : Optional[Any] = 1 + alpha
lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : Optional[int] = sin(_A )
lowercase : Dict = cos(_A )
lowercase : Optional[Any] = _sin / (2 * q_factor)
lowercase : int = 10 ** (gain_db / 40)
lowercase : str = 1 + alpha * big_a
lowercase : str = -2 * _cos
lowercase : Optional[int] = 1 - alpha * big_a
lowercase : Optional[Any] = 1 + alpha / big_a
lowercase : Tuple = -2 * _cos
lowercase : List[Any] = 1 - alpha / big_a
lowercase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : str = sin(_A )
lowercase : str = cos(_A )
lowercase : Optional[int] = _sin / (2 * q_factor)
lowercase : List[Any] = 10 ** (gain_db / 40)
lowercase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase : Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase : int = 2 * sqrt(_A ) * alpha
lowercase : Union[str, Any] = big_a * (pmc + aaa)
lowercase : int = 2 * big_a * mpc
lowercase : Optional[Any] = big_a * (pmc - aaa)
lowercase : Tuple = ppmc + aaa
lowercase : int = -2 * pmpc
lowercase : Optional[int] = ppmc - aaa
lowercase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : Union[str, Any] = sin(_A )
lowercase : List[Any] = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : List[str] = 10 ** (gain_db / 40)
lowercase : str = (big_a + 1) - (big_a - 1) * _cos
lowercase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase : str = (big_a - 1) - (big_a + 1) * _cos
lowercase : str = (big_a - 1) + (big_a + 1) * _cos
lowercase : int = 2 * sqrt(_A ) * alpha
lowercase : int = big_a * (ppmc + aaa)
lowercase : Optional[Any] = -2 * big_a * pmpc
lowercase : Tuple = big_a * (ppmc - aaa)
lowercase : Union[str, Any] = pmc + aaa
lowercase : List[Any] = 2 * mpc
lowercase : Optional[Any] = pmc - aaa
lowercase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 348 | 0 |
"""simple docstring"""
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = [
[],
[],
[],
]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCAmelCase_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : int ) -> str:
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCamelCase_ :
def __init__( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase_ : Union[str, Any] = min(self.queue )
self.queue.remove(lowerCAmelCase_ )
return data
def __str__( self : Dict ) -> str:
return str(self.queue )
def snake_case ( ):
UpperCAmelCase_ : Dict = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,1_00 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,1_28 )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case ( ):
UpperCAmelCase_ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 95 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : int = '''gpt_neo'''
SCREAMING_SNAKE_CASE : Tuple = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = window_size
SCREAMING_SNAKE_CASE_ : int = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resid_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = embed_dropout
SCREAMING_SNAKE_CASE_ : int = attention_dropout
SCREAMING_SNAKE_CASE_ : int = classifier_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_types
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A_ ( a , a , a , a ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : List[Any] = input.size()
SCREAMING_SNAKE_CASE_ : str = len(a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shape[dimension]
SCREAMING_SNAKE_CASE_ : List[Any] = torch.arange(0 , a , a )
SCREAMING_SNAKE_CASE_ : List[str] = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1
SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(a ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [slice(a )] * rank
SCREAMING_SNAKE_CASE_ : int = indices
SCREAMING_SNAKE_CASE_ : int = input[s]
SCREAMING_SNAKE_CASE_ : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def A_ ( a , a ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : str = torch.arange(1 , a )
SCREAMING_SNAKE_CASE_ : List[str] = torch.remainder(a , a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = remainders == 0
SCREAMING_SNAKE_CASE_ : Any = candidates[divisor_indices]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='floor' )
class _A ( __magic_name__):
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._config.num_heads
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : Any = seqlen + 2
SCREAMING_SNAKE_CASE_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ : List[Any] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ : Any = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 13
| 511 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case : Dict = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case : Tuple = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case : List[str] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __lowercase ( __lowerCAmelCase : str ):
def remove_articles(__lowerCAmelCase : Optional[Any] ):
a__ = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__lowerCAmelCase , ' ' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : int ):
a__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 1_0_0
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
a__ = [rgram for rgrams in rgramslist for rgram in rgrams]
a__ = Counter(__lowerCAmelCase )
a__ = Counter(__lowerCAmelCase )
a__ = Counter()
for sgram, scount in sgramcounter.items():
a__ = scount * numref
a__ = Counter(__lowerCAmelCase )
a__ = Counter()
for cgram, ccount in cgramcounter.items():
a__ = ccount * numref
# KEEP
a__ = sgramcounter_rep & cgramcounter_rep
a__ = keepgramcounter_rep & rgramcounter
a__ = sgramcounter_rep & rgramcounter
a__ = 0
a__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a__ = sgramcounter_rep - cgramcounter_rep
a__ = delgramcounter_rep - rgramcounter
a__ = sgramcounter_rep - rgramcounter
a__ = 0
a__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
a__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
a__ = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
a__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
a__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
a__ = addtmpscore / len(__lowerCAmelCase )
a__ = 0
if addscore_precision > 0 or addscore_recall > 0:
a__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
a__ = len(__lowerCAmelCase )
a__ = ssent.split(' ' )
a__ = csent.split(' ' )
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
for rsent in rsents:
a__ = rsent.split(' ' )
a__ = []
a__ = []
a__ = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a__ = sum([delascore, delascore, delascore, delascore] ) / 4
a__ = sum([addascore, addascore, addascore, addascore] ) / 4
a__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "13a" , __lowerCAmelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a__ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
a__ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
a__ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
a__ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
a__ = sentence
if not return_str:
a__ = normalized_sent.split()
return normalized_sent
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('Sources length must match predictions and references lengths.' )
a__ = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
a__ = sari_score / len(__lowerCAmelCase )
return 1_0_0 * sari_score
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]="exp" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]=False , ):
a__ = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
a__ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
a__ = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Any ) -> Optional[int]:
a__ = {}
result.update({'sari': compute_sari(sources=__snake_case ,predictions=__snake_case ,references=__snake_case )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case ,references=__snake_case )} )
result.update({'exact': compute_em(predictions=__snake_case ,references=__snake_case )} )
return result
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'timesformer'
def __init__( self , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=8 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-6 , _snake_case=True , _snake_case="divided_space_time" , _snake_case=0 , **_snake_case , ) -> str:
super().__init__(**_snake_case )
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Tuple = num_frames
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : str = attention_type
_UpperCamelCase : Optional[Any] = drop_path_rate
| 683 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ :Union[str, Any] = logging.getLogger(__name__)
def __lowercase (_lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, ) -> int:
"""simple docstring"""
__lowerCamelCase : List[Any] = bnb_quantization_config.load_in_abit
__lowerCamelCase : Optional[int] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__lowerCamelCase : Union[str, Any] = []
# custom device map
if isinstance(_lowercase, _lowercase ) and len(device_map.keys() ) > 1:
__lowerCamelCase : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__lowerCamelCase : Optional[Any] = get_keys_to_not_convert(_lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowercase )
__lowerCamelCase : Dict = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__lowerCamelCase : List[str] = []
__lowerCamelCase : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowercase )
# compatibility with peft
__lowerCamelCase : Optional[Any] = load_in_abit
__lowerCamelCase : Tuple = load_in_abit
__lowerCamelCase : Union[str, Any] = get_parameter_device(_lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__lowerCamelCase : Optional[Any] = replace_with_bnb_layers(_lowercase, _lowercase, modules_to_not_convert=_lowercase )
# convert param to the right dtype
__lowerCamelCase : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__lowerCamelCase : List[Any] = name.replace(""".weight""", """""" ).replace(""".bias""", """""" )
__lowerCamelCase : int = getattr(_lowercase, _lowercase, _lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowercase ):
param.to(_lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
__lowerCamelCase : str = replace_with_bnb_layers(
_lowercase, _lowercase, modules_to_not_convert=_lowercase )
__lowerCamelCase : Optional[int] = get_quantized_model_device_map(
_lowercase, _lowercase, _lowercase, max_memory=_lowercase, no_split_module_classes=_lowercase, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__lowerCamelCase : str = True
__lowerCamelCase : Optional[int] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowercase, _lowercase, _lowercase, dtype=bnb_quantization_config.torch_dtype, offload_folder=_lowercase, offload_state_dict=_lowercase, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(_lowercase, device_map=_lowercase, offload_dir=_lowercase )
def __lowercase (_lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__lowerCamelCase : str = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowercase, _lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__lowerCamelCase : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__lowerCamelCase : str = {}
__lowerCamelCase : int = special_dtypes
__lowerCamelCase : Union[str, Any] = no_split_module_classes
__lowerCamelCase : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__lowerCamelCase : List[str] = get_balanced_memory(
_lowercase, low_zero=(device_map == """balanced_low_0"""), max_memory=_lowercase, **_lowercase, )
__lowerCamelCase : List[str] = max_memory
__lowerCamelCase : str = infer_auto_device_map(_lowercase, **_lowercase )
if isinstance(_lowercase, _lowercase ):
# check if don't have any quantized module on the cpu
__lowerCamelCase : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__lowerCamelCase : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowercase (_lowercase, _lowercase, _lowercase=None, _lowercase=None ) -> Tuple:
"""simple docstring"""
if modules_to_not_convert is None:
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Dict = _replace_with_bnb_layers(
_lowercase, _lowercase, _lowercase, _lowercase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowercase (_lowercase, _lowercase, _lowercase=None, _lowercase=None, ) -> List[str]:
"""simple docstring"""
__lowerCamelCase : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
__lowerCamelCase : str = []
current_key_name.append(_lowercase )
if isinstance(_lowercase, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__lowerCamelCase : List[Any] = """.""".join(_lowercase )
__lowerCamelCase : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__lowerCamelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__lowerCamelCase : Dict = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=_lowercase, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
__lowerCamelCase : str = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__lowerCamelCase : List[str] = module.weight.data
if module.bias is not None:
__lowerCamelCase : Tuple = module.bias.data
bnb_module.requires_grad_(_lowercase )
setattr(_lowercase, _lowercase, _lowercase )
__lowerCamelCase : Dict = True
if len(list(module.children() ) ) > 0:
__lowerCamelCase : Optional[int] = _replace_with_bnb_layers(
_lowercase, _lowercase, _lowercase, _lowercase )
__lowerCamelCase : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
__lowerCamelCase : Optional[Any] = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__lowerCamelCase : str = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase, _lowercase ):
__lowerCamelCase : Tuple = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
__lowerCamelCase : Any = sum(_lowercase, [] )
__lowerCamelCase : Optional[int] = len(_lowercase ) > 0
# Check if it is a base model
__lowerCamelCase : Optional[int] = False
if hasattr(_lowercase, """base_model_prefix""" ):
__lowerCamelCase : Any = not hasattr(_lowercase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCamelCase : Dict = list(model.named_children() )
__lowerCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCamelCase : str = set(_lowercase ) - set(_lowercase )
__lowerCamelCase : Optional[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
__lowerCamelCase : Union[str, Any] = [""".weight""", """.bias"""]
__lowerCamelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCamelCase : Optional[int] = name.replace(_lowercase, """""" )
filtered_module_names.append(_lowercase )
return filtered_module_names
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowercase, bnb.nn.Linearabit ):
return True
return False
def __lowercase (_lowercase ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Any:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_lowercase, _lowercase, 0, dtype=_lowercase, value=_lowercase )
__lowerCamelCase : str = param_name
__lowerCamelCase : Tuple = model
if "." in tensor_name:
__lowerCamelCase : Dict = tensor_name.split(""".""" )
for split in splits[:-1]:
__lowerCamelCase : Union[str, Any] = getattr(_lowercase, _lowercase )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
__lowerCamelCase : str = new_module
__lowerCamelCase : Union[str, Any] = splits[-1]
# offload weights
__lowerCamelCase : List[Any] = False
offload_weight(module._parameters[tensor_name], _lowercase, _lowercase, index=_lowercase )
if hasattr(module._parameters[tensor_name], """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("""weight""", """SCB""" ), _lowercase, index=_lowercase, )
else:
offload_weight(_lowercase, _lowercase, _lowercase, index=_lowercase )
offload_weight(_lowercase, param_name.replace("""weight""", """SCB""" ), _lowercase, index=_lowercase )
set_module_tensor_to_device(_lowercase, _lowercase, """meta""", dtype=_lowercase, value=torch.empty(*param.size() ) )
| 701 |
'''simple docstring'''
from statistics import mean
import numpy as np
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : str = 0
# Number of processes finished
__lowerCamelCase : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase : int = [0] * no_of_process
# List to include calculation results
__lowerCamelCase : Optional[int] = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase : int = [burst_time[i] for i in np.argsort(_lowercase )]
__lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase : List[str] = arrival_time[i]
__lowerCamelCase : Dict = 0
# Index showing the location of the process being performed
__lowerCamelCase : Optional[Any] = 0
# Saves the current response ratio.
__lowerCamelCase : List[str] = 0
for i in range(0, _lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase : List[Any] = temp
__lowerCamelCase : str = i
# Calculate the turn around time
__lowerCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : List[Any] = [0] * no_of_process
for i in range(0, _lowercase ):
__lowerCamelCase : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase__ :List[Any] = 5
UpperCAmelCase__ :Tuple = ["""A""", """B""", """C""", """D""", """E"""]
UpperCAmelCase__ :Tuple = [1, 2, 3, 4, 5]
UpperCAmelCase__ :List[Any] = [1, 2, 3, 4, 5]
UpperCAmelCase__ :Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase__ :Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 483 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = LlamaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCAmelCase_ = LlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __a , __a , __a , unittest.TestCase ):
__A : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__A : Tuple = (LlamaForCausalLM,) if is_torch_available() else ()
__A : str = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : str = False
__A : Optional[int] = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = LlamaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = input_dict['''input_ids''']
lowerCAmelCase_ = input_ids.ne(1 ).to(_lowerCamelCase )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = '''single_label_classification'''
lowerCAmelCase_ = input_dict['''input_ids''']
lowerCAmelCase_ = input_ids.ne(1 ).to(_lowerCamelCase )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = '''multi_label_classification'''
lowerCAmelCase_ = input_dict['''input_ids''']
lowerCAmelCase_ = input_ids.ne(1 ).to(_lowerCamelCase )
lowerCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ = LlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = LlamaModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
lowerCAmelCase_ = original_model(_lowerCamelCase ).last_hidden_state
lowerCAmelCase_ = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase_ = LlamaModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(_lowerCamelCase ).last_hidden_state
lowerCAmelCase_ = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-5 ) )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase_ = model(torch.tensor(_lowerCamelCase ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase_ = model(torch.tensor(_lowerCamelCase ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase_ = model(torch.tensor(_lowerCamelCase ) )
lowerCAmelCase_ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase_ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase_ = '''Simply put, the theory of relativity states that '''
lowerCAmelCase_ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , return_tensors='''pt''' )
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=_lowerCamelCase )
# greedy generation outputs
lowerCAmelCase_ = model.generate(_lowerCamelCase , max_new_tokens=64 , top_p=_lowerCamelCase , temperature=1 , do_sample=_lowerCamelCase )
lowerCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 274 | '''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A_ : Optional[Any] =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A_ : Tuple ='''main'''
# Default branch name
A_ : Dict ='''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
A_ : int ='''aaaaaaa'''
# This commit does not exist, so we should 404.
A_ : List[str] ='''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
A_ : List[str] ='''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def snake_case_ ( ) -> Tuple:
print('''Welcome!''')
yield
print('''Bye!''')
@contextlib.contextmanager
def snake_case_ ( ) -> str:
print('''Bonjour!''')
yield
print('''Au revoir!''')
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def UpperCAmelCase_ ( self ):
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_tf
def UpperCAmelCase_ ( self ):
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_flax
def UpperCAmelCase_ ( self ):
# Flax models don't have labels
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
class __UpperCAmelCase ( __a ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
| 274 | 1 |
"""simple docstring"""
def lowercase ( a__ : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = [0] * len(a__ )
_UpperCamelCase = []
_UpperCamelCase = [1] * len(a__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a__ ) ):
if indegree[i] == 0:
queue.append(a__ )
while queue:
_UpperCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_UpperCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a__ )
print(max(a__ ) )
# Adjacency list of Graph
UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | """simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase = requests.get(image_url).content
UpperCAmelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 342 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ :Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ :int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Dict = """efficientformer"""
def __init__( self , _SCREAMING_SNAKE_CASE = [3, 2, 6, 4] , _SCREAMING_SNAKE_CASE = [48, 96, 224, 448] , _SCREAMING_SNAKE_CASE = [True, True, True, True] , _SCREAMING_SNAKE_CASE = 448 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1E-5 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 1E-12 , _SCREAMING_SNAKE_CASE = 224 , _SCREAMING_SNAKE_CASE = 1E-05 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = hidden_sizes
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = num_channels
a_ = depths
a_ = mlp_expansion_ratio
a_ = downsamples
a_ = dim
a_ = key_dim
a_ = attention_ratio
a_ = resolution
a_ = pool_size
a_ = downsample_patch_size
a_ = downsample_stride
a_ = downsample_pad
a_ = drop_path_rate
a_ = num_metaad_blocks
a_ = distillation
a_ = use_layer_scale
a_ = layer_scale_init_value
a_ = image_size
a_ = batch_norm_eps | 403 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a_ = json.loads(UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a_ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a_ = json.loads(UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __magic_name__ ( self ):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def __magic_name__ ( self ):
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
a_ = torch.device("""cpu""" )
a_ = 0
elif is_sagemaker_model_parallel_available():
a_ = smp.local_rank()
a_ = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
a_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
a_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def __magic_name__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __magic_name__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def __magic_name__ ( self ):
return False | 403 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = self.vocab_size - 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = OpenAIGPTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = OpenAIGPTLMHeadModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = OpenAIGPTDoubleHeadsModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = OpenAIGPTForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase__ : Union[str, Any] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ , )
_lowerCamelCase = inputs_dict['''labels''']
_lowerCamelCase = inputs_dict['''labels''']
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase__ , )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def snake_case__ ( self ):
_lowerCamelCase = OpenAIGPTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , n_embd=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = OpenAIGPTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCamelCase__ ) # the president is
_lowerCamelCase = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCamelCase = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase__ ) | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : List[str]=30 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Any=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int=2 , ) -> Optional[int]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =scope
__UpperCAmelCase =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__UpperCAmelCase =(image_size // patch_size) ** 2
__UpperCAmelCase =num_patches + 2
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
__UpperCAmelCase =DeiTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
__UpperCAmelCase =DeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =DeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Any:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =DeiTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =DeiTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Any ) -> Optional[Any]:
__UpperCAmelCase =self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : List[str] = False
def _a ( self : str ) -> List[str]:
__UpperCAmelCase =DeiTModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _a ( self : Optional[int] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a ( self : List[str] ) -> Tuple:
pass
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> int:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase =super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : Union[str, Any] ) -> str:
if not self.model_tester.is_training:
return
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__SCREAMING_SNAKE_CASE )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__UpperCAmelCase =self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _a ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCAmelCase =False
__UpperCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__UpperCAmelCase =self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__SCREAMING_SNAKE_CASE ),
*get_values(__SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
__UpperCAmelCase =problem_type["""title"""]
__UpperCAmelCase =problem_type["""num_labels"""]
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__UpperCAmelCase =self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
__UpperCAmelCase =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
__UpperCAmelCase =inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as warning_list:
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _a ( self : Tuple ) -> Dict:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =DeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowercase__ ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Any ) -> Any:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase =DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a ( self : List[str] ) -> int:
__UpperCAmelCase =DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
__UpperCAmelCase =inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
| 68 |
from torch import nn
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
super().__init__()
__lowercase : Any = class_size
__lowercase : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : Dict = nn.Linear(_snake_case , _snake_case )
def snake_case_ ( self : Any , _snake_case : str ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowercase : Any = self.mlp(_snake_case )
return logits
| 509 | 0 |
from math import isqrt
def _A ( __snake_case :int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , UpperCAmelCase__ ) if is_prime[i]]
def _A ( __snake_case :int = 10**8 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2 )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
from typing import List
import numpy as np
def _A ( __snake_case :dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__SCREAMING_SNAKE_CASE = max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def _A ( __snake_case :int , __snake_case :int ) -> List[range]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for group_idx in range(__snake_case ):
__SCREAMING_SNAKE_CASE = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__SCREAMING_SNAKE_CASE = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__SCREAMING_SNAKE_CASE = range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def _A ( __snake_case :dict , __snake_case :int ) -> List[dict]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
__SCREAMING_SNAKE_CASE = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def _A ( __snake_case :List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( __snake_case :np.random.Generator , __snake_case :dict ) -> dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
__SCREAMING_SNAKE_CASE = {}
for size in list_sizes:
__SCREAMING_SNAKE_CASE = list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__SCREAMING_SNAKE_CASE = dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = [value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 214 | 0 |
"""simple docstring"""
import sys
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : int = len(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
lowerCAmelCase_ : List[Any] = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
for chain_length in range(2 , __UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase_ : Optional[Any] = a + chain_length - 1
lowerCAmelCase_ : List[Any] = sys.maxsize
for c in range(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase_ : Optional[Any] = cost
lowerCAmelCase_ : Tuple = c
return matrix, sol
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if i == j:
print("A" + str(__UpperCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__UpperCamelCase , __UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCamelCase , optimal_solution[i][j] + 1 , __UpperCamelCase )
print(")" , end=" " )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = [30, 35, 15, 5, 10, 20, 25]
lowerCAmelCase_ : List[str] = len(__UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase_ , lowerCAmelCase_ : Dict = matrix_chain_order(__UpperCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 610 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
lowercase__ = re.compile(r"""([a-z\d])([A-Z])""")
lowercase__ = re.compile(r"""(?<!_)_(?!_)""")
lowercase__ = re.compile(r"""(_{2,})""")
lowercase__ = r"""^\w+(\.\w+)*$"""
lowercase__ = r"""<>:/\|?*"""
def __lowerCamelCase ( __UpperCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Tuple = _uppercase_uppercase_re.sub(r"\1_\2" , __UpperCamelCase )
lowerCAmelCase_ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , __UpperCamelCase )
return name.lower()
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = _single_underscore_re.split(__UpperCamelCase )
lowerCAmelCase_ : str = [_multiple_underscores_re.split(__UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__UpperCamelCase ) if n != "" )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if os.path.basename(__UpperCamelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(__UpperCamelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __UpperCamelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(__UpperCamelCase )}-{split}'''
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = filename_prefix_for_split(__UpperCamelCase , __UpperCamelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , __UpperCamelCase )
return f'''{filepath}*'''
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : int = filename_prefix_for_split(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
if shard_lengths:
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Any = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__UpperCamelCase )]
if filetype_suffix:
lowerCAmelCase_ : Dict = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCAmelCase_ : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 610 | 1 |
'''simple docstring'''
from typing import Any
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Union[str, Any] = None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> int:
SCREAMING_SNAKE_CASE : int = None
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = self.head
while temp is not None:
print(temp.data , end=' ' )
SCREAMING_SNAKE_CASE : Dict = temp.next
print()
def _UpperCamelCase ( self , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = Node(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = self.head
SCREAMING_SNAKE_CASE : List[str] = new_node
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Dict = node_a.next
SCREAMING_SNAKE_CASE : int = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Dict = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCAmelCase :int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 179 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : str = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase__ ) , torch_builtin(lowercase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase__ ) , gelu_new(lowercase__ ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : str = get_activation('gelu' )
SCREAMING_SNAKE_CASE : Dict = get_activation('gelu_10' )
SCREAMING_SNAKE_CASE : Optional[int] = torch_builtin(lowercase__ )
SCREAMING_SNAKE_CASE : str = geluaa(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase__ ):
get_activation('bogus' )
with self.assertRaises(lowercase__ ):
get_activation(lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = get_activation('gelu' )
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[str] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = acta.a
| 179 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if is_torch_version('<' , '2.0.0') or not hasattr(lowerCamelCase_ , '_dynamo'):
return False
return isinstance(lowerCamelCase_ , torch._dynamo.eval_frame.OptimizedModule)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = True) -> List[str]:
UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : List[Any] = is_compiled_module(lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : Union[str, Any] = model
UpperCamelCase__ : Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : str = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Tuple = getattr(lowerCamelCase_ , 'forward')
UpperCamelCase__ : Tuple = model.__dict__.pop('_original_forward' , lowerCamelCase_)
if original_forward is not None:
while hasattr(lowerCamelCase_ , '__wrapped__'):
UpperCamelCase__ : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Dict = forward
if getattr(lowerCamelCase_ , '_converted_to_transformer_engine' , lowerCamelCase_):
convert_model(lowerCamelCase_ , to_transformer_engine=lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : List[str] = model
UpperCamelCase__ : List[str] = compiled_model
return model
def __UpperCAmelCase ( ) -> int:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase_ , lowerCamelCase_)
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase_ , lowerCamelCase_)
@contextmanager
def __UpperCAmelCase ( **lowerCamelCase_) -> Any:
for key, value in kwargs.items():
UpperCamelCase__ : str = str(lowerCamelCase_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if not hasattr(lowerCamelCase_ , '__qualname__') and not hasattr(lowerCamelCase_ , '__name__'):
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , '__class__' , lowerCamelCase_)
if hasattr(lowerCamelCase_ , '__qualname__'):
return obj.__qualname__
if hasattr(lowerCamelCase_ , '__name__'):
return obj.__name__
return str(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
for key, value in source.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[Any] = destination.setdefault(lowerCamelCase_ , {})
merge_dicts(lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = value
return destination
def __UpperCAmelCase ( lowerCamelCase_ = None) -> bool:
if port is None:
UpperCamelCase__ : List[Any] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
| 596 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Any = state_dict.pop(UpperCAmelCase )
snake_case__ : Tuple = val
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : int = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ : Optional[Any] = value
else:
snake_case__ : Optional[int] = value
return new_state_dict
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase=False ):
"""simple docstring"""
snake_case__ : int = """"""
if is_panoptic:
snake_case__ : List[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[:256, :]
snake_case__ : List[str] = in_proj_bias[:256]
snake_case__ : List[str] = in_proj_weight[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias[256:512]
snake_case__ : str = in_proj_weight[-256:, :]
snake_case__ : Optional[Any] = in_proj_bias[-256:]
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Union[str, Any] = """resnet101"""
if "dc5" in model_name:
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = """panoptic""" in model_name
if is_panoptic:
snake_case__ : Any = 250
else:
snake_case__ : Union[str, Any] = 91
snake_case__ : Dict = """huggingface/label-files"""
snake_case__ : List[str] = """coco-detection-id2label.json"""
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : str = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case__ : List[str] = ConditionalDetrImageProcessor(format=UpperCAmelCase )
# prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
snake_case__ : List[str] = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case__ : List[str] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCAmelCase , pretrained=UpperCAmelCase ).eval()
snake_case__ : Union[str, Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Dict = """conditional_detr.""" + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case__ : Any = rename_backbone_keys(UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case__ : Optional[int] = state_dict.pop(UpperCAmelCase )
snake_case__ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[Any] = state_dict.pop(UpperCAmelCase )
snake_case__ : List[str] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case__ : Dict = state_dict.pop(UpperCAmelCase )
snake_case__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ : Union[str, Any] = state_dict.pop(UpperCAmelCase )
snake_case__ : str = val
# finally, create HuggingFace model and load state dict
snake_case__ : Any = ConditionalDetrForSegmentation(UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case__ : Optional[int] = conditional_detr(UpperCAmelCase )
snake_case__ : int = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 711 |
'''simple docstring'''
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(UpperCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _lowercase ) -> bool:
__A : str = len(_lowercase )
# We need to create solution object to save path.
__A : Union[str, Any] = [[0 for _ in range(_lowercase )] for _ in range(_lowercase )]
__A : Tuple = run_maze(_lowercase , 0 , 0 , _lowercase )
if solved:
print("\n".join(str(_lowercase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
__A : List[str] = len(_lowercase )
# Final check point.
if i == j == (size - 1):
__A : List[Any] = 1
return True
__A : int = (not i < 0) and (not j < 0) # Check lower bounds
__A : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__A : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__A : Tuple = 1
# check for directions
if (
run_maze(_lowercase , i + 1 , _lowercase , _lowercase )
or run_maze(_lowercase , _lowercase , j + 1 , _lowercase )
or run_maze(_lowercase , i - 1 , _lowercase , _lowercase )
or run_maze(_lowercase , _lowercase , j - 1 , _lowercase )
):
return True
__A : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520 | import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase = random.Random()
def lowerCamelCase_ ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> List[Any]:
if rng is None:
__A : Union[str, Any] = global_rng
__A : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2_000 , __UpperCAmelCase=24 , __UpperCAmelCase=24 , __UpperCAmelCase=0.0 , __UpperCAmelCase=16_000 , __UpperCAmelCase=True , __UpperCAmelCase=True , ):
__A : Dict = parent
__A : Optional[int] = batch_size
__A : Any = min_seq_length
__A : Optional[int] = max_seq_length
__A : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : List[str] = feature_size
__A : List[str] = num_mel_bins
__A : List[Any] = padding_value
__A : Union[str, Any] = sampling_rate
__A : Optional[Any] = return_attention_mask
__A : int = do_normalize
def __UpperCAmelCase( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
__A : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : List[str] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase( self ):
__A : str = SpeechaTextFeatureExtractionTester(self )
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __UpperCAmelCase( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Tuple = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__A : Dict = feature_extractor(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__A : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__A : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
__A : str = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
__A : str = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__A : Optional[Any] = np.asarray(__UpperCAmelCase )
__A : Dict = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
__A : List[str] = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __UpperCAmelCase( self ):
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Optional[int] = ["longest", "max_length", "do_not_pad"]
__A : int = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = feature_extractor(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase )
__A : Dict = inputs.input_features
__A : str = inputs.attention_mask
__A : Union[str, Any] = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase( self ):
__A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : List[Any] = ["longest", "max_length", "do_not_pad"]
__A : Tuple = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[int] = feature_extractor(
__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase )
__A : Any = inputs.input_features
__A : Any = inputs.attention_mask
__A : Dict = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase( self ):
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Dict = feature_extractor(
__UpperCAmelCase , padding="max_length" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
__A : List[str] = inputs.input_features
__A : Union[str, Any] = inputs.attention_mask
__A : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCAmelCase( self ):
__A : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Optional[Any] = feature_extractor(
__UpperCAmelCase , padding="longest" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
__A : Optional[Any] = inputs.input_features
__A : Dict = inputs.attention_mask
__A : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__A : Optional[int] = feature_extractor(
__UpperCAmelCase , padding="longest" , max_length=16 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
__A : List[str] = inputs.input_features
__A : str = inputs.attention_mask
__A : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __UpperCAmelCase( self ):
import torch
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : str = np.random.rand(100 , 32 ).astype(np.floataa )
__A : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__A : int = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase( self , __UpperCAmelCase ):
from datasets import load_dataset
__A : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__A : Tuple = ds.sort("id" ).select(range(__UpperCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase( self ):
# fmt: off
__A : Optional[int] = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__A : str = self._load_datasamples(1 )
__A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) )
| 520 | 1 |
def a__ ( a ) -> str:
return "".join([hex(a )[2:].zfill(2 ).upper() for byte in list(a )] )
def a__ ( a ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(a ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(a ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 | import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def a__ ( a , a , a ) -> Any:
A_ : List[Any] = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
A_ : str = downstream_dict['''projector.weight''']
A_ : Dict = downstream_dict['''projector.bias''']
A_ : str = downstream_dict['''model.post_net.linear.weight''']
A_ : Optional[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''model.linear.weight''']
A_ : str = downstream_dict['''model.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : Union[str, Any] = WavaVecaForXVector.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''connector.weight''']
A_ : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Dict = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A_ : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A_ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
A_ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
A_ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
A_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
A_ : Union[str, Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def a__ ( a , a , a , a ) -> str:
A_ : List[Any] = torch.load(a , map_location='''cpu''' )
A_ : int = checkpoint['''Downstream''']
A_ : Union[str, Any] = WavaVecaConfig.from_pretrained(a )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
A_ : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
A_ : str = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
A_ : Tuple = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
A_ : Dict = convert_xvector(a , a , a )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A_ : List[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 236 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( ):
_A : Dict = 0
for i in range(1 ,1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 128 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a_ ) , "Tatoeba directory does not exist." )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : List[str]):
_A : List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE)
@slow
def A ( self : Any):
self.resolver.convert_models(['heb-eng'])
@slow
def A ( self : Any):
_A , _A : Dict = self.resolver.write_model_card('opus-mt-he-en' , dry_run=SCREAMING_SNAKE_CASE)
assert mmeta["long_pair"] == "heb-eng"
| 128 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
def A_( A , A , A ):
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(A , config=A )
UpperCAmelCase_ = downstream_dict["""projector.weight"""]
UpperCAmelCase_ = downstream_dict["""projector.bias"""]
UpperCAmelCase_ = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase_ = downstream_dict["""model.post_net.linear.bias"""]
return model
def A_( A , A , A ):
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A )
UpperCAmelCase_ = downstream_dict["""model.linear.weight"""]
UpperCAmelCase_ = downstream_dict["""model.linear.bias"""]
return model
def A_( A , A , A ):
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(A , config=A )
UpperCAmelCase_ = downstream_dict["""connector.weight"""]
UpperCAmelCase_ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase_ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def A_( A , A , A , A ):
UpperCAmelCase_ = torch.load(A , map_location="""cpu""" )
UpperCAmelCase_ = checkpoint["""Downstream"""]
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(A )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase_ = convert_classification(A , A , A )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase_ = convert_diarization(A , A , A )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase_ = convert_xvector(A , A , A )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
UpperCamelCase__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 486 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
| 486 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( snake_case__ ):
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def _lowerCamelCase ( self , a_ ):
return self.pre_processor(_A , return_tensors="pt" , truncation=_A )
def _lowerCamelCase ( self , a_ ):
return self.model.generate(**_A )[0]
def _lowerCamelCase ( self , a_ ):
return self.pre_processor.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
| 525 |
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) <= 1:
return lst
__A : Any = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A : str = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 239 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase__ : List[str] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __lowercase ( _a ):
snake_case_ : List[str] = {}
state_dict.pop('''pixel_mean''' , _a )
state_dict.pop('''pixel_std''' , _a )
snake_case_ : Union[str, Any] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ : Optional[int] = key.replace(_a , _a )
if re.match(_a , _a ):
snake_case_ : Union[str, Any] = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
snake_case_ : Optional[int] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
snake_case_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
snake_case_ : List[Any] = key.replace('''layers.2''' , '''proj_out''' )
snake_case_ : Optional[Any] = value
snake_case_ : Tuple = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase ( _a , _a , _a , _a="ybelkada/segment-anything" ):
snake_case_ : Optional[Any] = hf_hub_download(_a , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
snake_case_ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ : Optional[Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ : Union[str, Any] = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
snake_case_ : Tuple = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ : List[str] = SamConfig(
vision_config=_a , )
snake_case_ : Tuple = torch.load(_a , map_location='''cpu''' )
snake_case_ : Optional[Any] = replace_keys(_a )
snake_case_ : Any = SamImageProcessor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=_a )
snake_case_ : Tuple = SamModel(_a )
hf_model.load_state_dict(_a )
snake_case_ : Tuple = hf_model.to('''cuda''' )
snake_case_ : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
snake_case_ : Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
snake_case_ : Tuple = [[[400, 650]]]
snake_case_ : List[str] = [[1]]
snake_case_ : Optional[int] = processor(images=np.array(_a ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case_ : Optional[Any] = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case_ : Tuple = ((75, 275, 1_725, 850),)
snake_case_ : Optional[Any] = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case_ : Union[str, Any] = [[[400, 650], [800, 650]]]
snake_case_ : Optional[int] = [[1, 1]]
snake_case_ : Tuple = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
lowercase__ : Any = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowercase__ : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 485 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( _a , _a , _a ):
snake_case_ : Tuple = AutoConfig.from_pretrained(_a )
snake_case_ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=_a )
snake_case_ : Union[str, Any] = checkpoints.load_tax_checkpoint(_a )
snake_case_ : Optional[int] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case_ : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Dict = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ : Any = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Optional[Any] = flax_model.params['''encoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : List[str] = tax_attention_key
snake_case_ : Optional[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : str = tax_attention_value
snake_case_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case_ : Any = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : Union[str, Any] = tax_mlp_wi
snake_case_ : List[Any] = tax_mlp_wo
snake_case_ : int = tax_mlp_layer_norm
snake_case_ : Any = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case_ : Tuple = tax_encoder_global_rel_embedding
# Assigning
snake_case_ : Dict = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case_ : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ : Tuple = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case_ : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case_ : Optional[Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case_ : str = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case_ : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case_ : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Dict = flax_model.params['''decoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : int = tax_attention_key
snake_case_ : List[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : Dict = tax_attention_value
snake_case_ : str = tax_pre_attention_layer_norm
snake_case_ : Any = tax_enc_dec_attention_key
snake_case_ : str = tax_enc_dec_attention_out
snake_case_ : int = tax_enc_dec_attention_query
snake_case_ : Any = tax_enc_dec_attention_value
snake_case_ : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ : Tuple = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : List[Any] = tax_mlp_wi
snake_case_ : Dict = tax_mlp_wo
snake_case_ : List[Any] = txa_mlp_layer_norm
snake_case_ : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ : str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case_ : Tuple = txa_decoder_norm
# Only for layer 0:
snake_case_ : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case_ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 485 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = GPTSwaTokenizer
_A = False
_A = True
_A = False
def _UpperCAmelCase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
_a = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ):
_a = 'This is a test'
_a = 'This is a test'
return input_text, output_text
def _UpperCAmelCase ( self : Dict ):
_a = '<s>'
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2_0_0_0 )
def _UpperCAmelCase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def _UpperCAmelCase ( self : int ):
_a = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
_a = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
_a = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_a = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _UpperCAmelCase ( self : Tuple ):
_a = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
_a = ['This is a test', 'I was born in 92000, and this is falsé.']
_a = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
_a = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_a = {'input_ids': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE_ , )
| 562 |
lowercase_ = 9.80665
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 562 | 1 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__snake_case = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(snake_case):
os.makedirs(snake_case)
__snake_case = model.state_dict()
def to_tf_var_name(snake_case):
for patt, repl in iter(snake_case):
__snake_case = name.replace(snake_case, snake_case)
return f"bert/{name}"
def create_tf_var(snake_case, snake_case, snake_case):
__snake_case = tf.dtypes.as_dtype(tensor.dtype)
__snake_case = tf.get_variable(dtype=snake_case, shape=tensor.shape, name=snake_case, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(snake_case)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__snake_case = to_tf_var_name(snake_case)
__snake_case = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__snake_case = torch_tensor.T
__snake_case = create_tf_var(tensor=snake_case, name=snake_case, session=snake_case)
tf.keras.backend.set_value(snake_case, snake_case)
__snake_case = session.run(snake_case)
print(f"Successfully created {tf_name}: {np.allclose(snake_case, snake_case)}")
__snake_case = tf.train.Saver(tf.trainable_variables())
saver.save(snake_case, os.path.join(snake_case, model_name.replace('''-''', '''_''') + '''.ckpt'''))
def SCREAMING_SNAKE_CASE ( snake_case=None):
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_name''', type=snake_case, required=snake_case, help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''', type=snake_case, default=snake_case, required=snake_case, help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''', type=snake_case, required=snake_case, help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''', type=snake_case, required=snake_case, help='''Directory in which to save tensorflow model''')
__snake_case = parser.parse_args(snake_case)
__snake_case = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=snake_case, ckpt_dir=args.tf_cache_dir, model_name=args.model_name)
if __name__ == "__main__":
main() | 93 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = 0
__snake_case = number
while duplicate > 0:
__snake_case , __snake_case = divmod(snake_case, 10)
fact_sum += factorial(snake_case)
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
__lowercase : Optional[int] = int(input("Enter number: ").strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
) | 93 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = 5
# Realm tok
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A_ , exist_ok=A_ )
_UpperCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A_ , exist_ok=A_ )
def a ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
_UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def a ( self ):
_UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def a ( self ):
_UpperCamelCase = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A_ , )
return block_records
def a ( self ):
_UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def a ( self ):
_UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 138 | 0 |
from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "new" , _lowerCamelCase = None ):
A : int = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ):
A : Optional[Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(_lowerCamelCase )
A : List[str] = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
A : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )}
A : Tuple = {}
for id_ in range(_lowerCamelCase ):
A : Union[str, Any] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""])) | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = CustomTokenizer
pass
| 36 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = CLIPTextModelWithProjection(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Dict:
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = sd_pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __a ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> str:
pass
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = 3 * [inputs["prompt"]]
lowerCAmelCase_ = sd_pipe(**_a )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = sd_pipe.encode_prompt(_a , negative_prompt=_a )
lowerCAmelCase_ = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Optional[int]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 122 | 0 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
_a : Union[str, Any] =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_a : int =qiskit.QuantumCircuit(_UpperCAmelCase ,_UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
_a : Optional[Any] =qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 506 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str = " " ) -> list:
_a : int =[]
_a : Tuple =0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Union[str, Any] =index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 506 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( snake_case__ ):
snake_case__ = (IPNDMScheduler,)
snake_case__ = (('num_inference_steps', 50),)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict ) -> Any:
a_ : Any = {'num_train_timesteps': 1000}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
a_ : Tuple = dict(self.forward_default_kwargs )
a_ : str = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
a_ : int = self.dummy_sample
a_ : List[Any] = 0.1 * sample
a_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ : int = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
a_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
a_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
a_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
a_ : int = dummy_past_residuals[:]
a_ : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : str = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
a_ : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : List[str] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
a_ : Any = dict(self.forward_default_kwargs )
a_ : str = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = self.dummy_sample
a_ : Tuple = 0.1 * sample
a_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ : List[str] = self.get_scheduler_config()
a_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
a_ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
a_ : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
a_ : Tuple = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
a_ : Any = dummy_past_residuals[:]
a_ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : Any = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
a_ : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
a_ : List[Any] = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
a_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
a_ : Tuple = 10
a_ : List[Any] = self.dummy_model()
a_ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
a_ : List[str] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a_ : int = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
a_ : str = dict(self.forward_default_kwargs )
a_ : Any = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
a_ : Tuple = self.get_scheduler_config()
a_ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
a_ : Dict = self.dummy_sample
a_ : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
a_ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ : int = dummy_past_residuals[:]
a_ : Union[str, Any] = scheduler.timesteps[5]
a_ : Tuple = scheduler.timesteps[6]
a_ : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a_ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
a_ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : List[str] = self.full_loop()
a_ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 466 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Any =False
@skip_mps
class UpperCamelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[int] = StableDiffusionAttendAndExcitePipeline
_a : Union[str, Any] = False
_a : Dict = TEXT_TO_IMAGE_PARAMS
_a : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , )
lowerCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCamelCase_ : Any = CLIPTextModel(lowerCamelCase )
lowerCamelCase_ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=0 ):
if str(lowerCamelCase ).startswith('mps' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase_ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] = 'cpu'
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : List[str] = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase_ : List[str] = pipe(**lowerCamelCase ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ : Dict = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __a ( self : Tuple ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self : Tuple ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def __a ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(51 )
lowerCamelCase_ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ : List[Any] = 'a painting of an elephant with glasses'
lowerCamelCase_ : Tuple = [5, 7]
lowerCamelCase_ : List[str] = pipe(
prompt=lowerCamelCase , token_indices=lowerCamelCase , guidance_scale=7.5 , generator=lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 364 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = """Wav2Vec2FeatureExtractor"""
_a = """AutoTokenizer"""
def __init__( self , A , A ) -> str:
'''simple docstring'''
super().__init__(_a , _a )
__magic_name__ = self.feature_extractor
__magic_name__ = False
@classmethod
def __A ( cls , A , **A ) -> Any:
'''simple docstring'''
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _a , )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
__magic_name__ = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *A , **A ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ = kwargs.pop('''raw_speech''' )
else:
__magic_name__ = kwargs.pop('''audio''' , _a )
__magic_name__ = kwargs.pop('''sampling_rate''' , _a )
__magic_name__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
__magic_name__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings["""input_ids"""]
return inputs
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
__magic_name__ = kwargs.pop('''input_features''' , _a )
__magic_name__ = kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if input_features is not None:
__magic_name__ = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
__magic_name__ = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ = labels["""input_ids"""]
return input_features
def __A ( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __A ( self ) -> int:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
"""simple docstring"""
def snake_case ( A__ ):
return "".join(chr(ord(A__ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 95 |
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( A__ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
lowerCamelCase_ : List[str] = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
lowerCamelCase_ : Tuple = soup.findAll("""h1""" )
lowerCamelCase_ : Tuple = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 278 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE( __UpperCamelCase = "laptop" ) -> str:
a__ : List[str] = F'https://www.amazon.in/laptop/s?k={product}'
a__ : int = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a__ : str = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
a__ : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
a__ : Any = item.ha.text
a__ : Optional[Any] = 'https://www.amazon.in/' + item.ha.a['href']
a__ : Optional[int] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
a__ : Any = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
a__ : Optional[int] = 'Not available'
try:
a__ : Optional[Any] = (
'₹'
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
a__ : Optional[int] = ''
try:
a__ : List[str] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 1_00 )
except ValueError:
a__ : Optional[int] = float("nan" )
except AttributeError:
pass
a__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a__ : Optional[Any] = ' '
a__ : Tuple = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase = """headphones"""
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 716 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to log verbose messages or not."} , )
A :Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
A :Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
A :Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Any:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
a__ : int = logging.WARNING
if model_args.verbose_logging:
a__ : List[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
a__ : Union[str, Any] = logging.INFO
logger.setLevel(__UpperCamelCase )
@dataclass
class _a :
'''simple docstring'''
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A :Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A :Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
A :Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A :Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
A :Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _a :
'''simple docstring'''
A :WavaVecaForPreTraining
A :WavaVecaFeatureExtractor
A :Union[bool, str] = "longest"
A :Optional[int] = None
A :Optional[int] = None
def __call__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.feature_extractor.pad(
__UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
a__ : Optional[int] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
a__ : Optional[int] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
a__ : Any = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
a__ : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
a__ : List[Any] = 1
a__ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
a__ : Union[str, Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCAmelCase , min_masks=2 , )
return batch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
a__ : Optional[Any] = 0
a__ : Tuple = max_gumbel_temp
a__ : str = min_gumbel_temp
a__ : List[Any] = gumbel_temp_decay
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
model.train()
a__ : Optional[Any] = self._prepare_inputs(__UpperCAmelCase )
if self.use_amp:
with autocast():
a__ : List[Any] = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
else:
a__ : List[Any] = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
a__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ : List[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
a__ : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ : str = parser.parse_args_into_dataclasses()
configure_logger(__UpperCamelCase , __UpperCamelCase )
# Downloading and loading a dataset from the hub.
a__ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
a__ : Tuple = DatasetDict()
a__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
a__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
a__ : List[Any] = DatasetDict()
a__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
a__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
a__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCamelCase )
def prepare_dataset(__UpperCamelCase ):
# check that all files have the correct sampling rate
a__ , a__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
a__ : Optional[Any] = datasets.map(
__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
a__ : Tuple = vectorized_datasets.filter(
lambda __UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__UpperCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
a__ : Tuple = vectorized_datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
a__ : List[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
a__ : Optional[Any] = WavaVecaForPreTraining(__UpperCamelCase )
a__ : Dict = DataCollatorForWavaVecaPretraining(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
a__ : Optional[int] = WavaVecaPreTrainer(
model=__UpperCamelCase , data_collator=__UpperCamelCase , args=__UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 207 | 0 |
'''simple docstring'''
import qiskit
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : int = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 675 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a_ : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __snake_case ( ):
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __snake_case ( ):
lowerCamelCase_ = "rougeLsum"
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __snake_case ( ):
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def __snake_case ( ):
lowerCamelCase_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def __snake_case ( ):
lowerCamelCase_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __snake_case ( ):
lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 675 | 1 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase ( A_ )-> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowercase ( A_ , A_ )-> XGBClassifier:
'''simple docstring'''
a : Optional[Any] = XGBClassifier()
classifier.fit(A_ , A_ )
return classifier
def lowercase ( )-> None:
'''simple docstring'''
a : Dict = load_iris()
a : Dict = data_handling(A_ )
a : Optional[int] = train_test_split(
A_ , A_ , test_size=0.2_5 )
a : int = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : int = xgboost(A_ , A_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A_ , A_ , A_ , display_labels=A_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 708 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Any = (IPNDMScheduler,)
UpperCAmelCase : Optional[int] = (("""num_inference_steps""", 5_0),)
def __snake_case ( self : Dict , **__UpperCAmelCase : Optional[Any]):
a : str = {"num_train_timesteps": 1000}
config.update(**__UpperCAmelCase)
return config
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : Union[str, Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : int = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : int = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : List[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : List[Any] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : Optional[Any] = dummy_past_residuals[:]
a : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : str = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : int):
pass
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str=0 , **__UpperCAmelCase : List[Any]):
a : List[str] = dict(self.forward_default_kwargs)
a : Any = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : Tuple = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : Optional[int] = self.get_scheduler_config()
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a : Optional[int] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a : str = dummy_past_residuals[:]
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Tuple = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : str , **__UpperCAmelCase : Dict):
a : Tuple = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : Any = scheduler_class(**__UpperCAmelCase)
a : int = 10
a : Union[str, Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
for i, t in enumerate(scheduler.timesteps):
a : Tuple = model(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def __snake_case ( self : Optional[Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : List[str] = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a : Tuple = self.get_scheduler_config()
a : Any = scheduler_class(**__UpperCAmelCase)
a : Dict = self.dummy_sample
a : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a : Dict = dummy_past_residuals[:]
a : Optional[int] = scheduler.timesteps[5]
a : List[Any] = scheduler.timesteps[6]
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __snake_case ( self : Tuple):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : int):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Optional[int] = self.full_loop()
a : List[str] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 2540529) < 10
| 135 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 100 , ) -> float:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = x_start
__UpperCAmelCase : List[str] = fnc(UpperCamelCase )
__UpperCAmelCase : Tuple = 0.0
for _ in range(UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCAmelCase : int = (x_end - x_start) / steps + xa
__UpperCAmelCase : Tuple = fnc(UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCAmelCase : List[Any] = xa
__UpperCAmelCase : Dict = fxa
return area
if __name__ == "__main__":
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 77 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = """
Human: <<task>>
Assistant: """
A = """huggingface-tools/default-prompts"""
A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]:
"""simple docstring"""
if prompt_or_repo_id is None:
__UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase ) is not None:
return prompt_or_repo_id
__UpperCAmelCase : str = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 77 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowercase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
lowercase , lowercase , lowercase =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def UpperCamelCase ( lowercase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return (gray > 1_2_7) & (gray <= 2_5_5)
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
lowercase =np.zeros_like(lowercase_ )
lowercase =np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase =image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase =(
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase =int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_UpperCAmelCase : int = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
_UpperCAmelCase : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_UpperCAmelCase : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_UpperCAmelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_UpperCAmelCase : Any = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 720 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] ) -> List[Any]:
'''simple docstring'''
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_55 , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =do_rescale
lowercase =rescale_value
lowercase =do_normalize
lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =do_rescale if do_rescale is not None else self.do_rescale
lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase =do_normalize if do_normalize is not None else self.do_normalize
lowercase =image_mean if image_mean is not None else self.image_mean
lowercase =image_std if image_std is not None else self.image_std
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
lowercase =[self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
lowercase =[self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
def run_func(_lowerCamelCase ):
@wraps(_lowerCamelCase )
def run_in_eager_mode(*_lowerCamelCase , **_lowerCamelCase ):
return func(*_lowerCamelCase , **_lowerCamelCase )
@wraps(_lowerCamelCase )
@tf.function(experimental_compile=_lowerCamelCase )
def run_in_graph_mode(*_lowerCamelCase , **_lowerCamelCase ):
return func(*_lowerCamelCase , **_lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
__snake_case : Dict = random.Random()
__snake_case : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _A ( __lowercase ):
lowercase__: TensorFlowBenchmarkArguments
lowercase__: PretrainedConfig
lowercase__: str = "TensorFlow"
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return tf.__version__
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : Any = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_inference )
def lowercase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : List[Any] = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_train )
def lowercase__ ( self : int , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
__snake_case : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : Optional[Any] = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_inference )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : List[str] = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_train )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Callable[[], None]:
"""simple docstring"""
__snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__snake_case : str = (
hasattr(__magic_name__ , """architectures""" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : List[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : Any = __import__("""transformers""" , fromlist=[model_class] )
__snake_case : Union[str, Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Dict = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__snake_case : int = TF_MODEL_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
__snake_case : Optional[int] = config.vocab_size if hasattr(__magic_name__ , """vocab_size""" ) else config.encoder.vocab_size
__snake_case : str = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__magic_name__ , decoder_input_ids=__magic_name__ , training=__magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__magic_name__ , training=__magic_name__ )
__snake_case : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Callable[[], None]:
"""simple docstring"""
__snake_case : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__snake_case : Optional[int] = (
hasattr(__magic_name__ , """architectures""" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : Optional[int] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : Dict = __import__("""transformers""" , fromlist=[model_class] )
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Tuple = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__snake_case : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
__snake_case : Optional[Any] = config.vocab_size if hasattr(__magic_name__ , """vocab_size""" ) else config.encoder.vocab_size
__snake_case : List[str] = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__snake_case : Tuple = model(__magic_name__ , decoder_input_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
__snake_case : Dict = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__snake_case : Optional[Any] = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
__snake_case : Optional[int] = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
__snake_case : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowercase__ ( self : str , __magic_name__ : Tuple ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__magic_name__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__snake_case : Optional[int] = timeit.repeat(
__magic_name__ , repeat=self.args.repeat , number=10 , )
return min(__magic_name__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowercase__ ( self : Tuple , __magic_name__ : Callable[[], None] ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__snake_case : Dict = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__snake_case : int = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__snake_case : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__snake_case : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(__magic_name__ )
__snake_case : Union[str, Any] = meminfo.used
__snake_case : List[Any] = Memory(__magic_name__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__snake_case : int = None
else:
__snake_case : Dict = measure_peak_memory_cpu(__magic_name__ )
__snake_case : Union[str, Any] = Memory(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__snake_case : Tuple = stop_memory_tracing(__magic_name__ )
if memory is None:
__snake_case : Any = summary.total
else:
__snake_case : Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 26 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
snake_case_ : str = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : List[Any] , *_snake_case : int , **_snake_case : Tuple):
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 169 |
def A (__A : int ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 0:
return False
UpperCAmelCase_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str=0 ):
a__ : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
a__ : Optional[int] = torch.manual_seed(lowerCamelCase__ )
a__ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase( self : List[str] ):
a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[Any] = self.get_dummy_inputs()
a__ : Any = pipe(**lowerCamelCase__ ).images
a__ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
a__ : Union[str, Any] = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : List[Any] = self.get_dummy_inputs()
a__ : Optional[Any] = pipe(**lowerCamelCase__ ).images
a__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Tuple = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase( self : List[Any] ):
a__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : List[Any] = self.get_dummy_inputs()
a__ : Dict = pipe(**lowerCamelCase__ ).images
a__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Optional[int] = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase( self : List[str] ):
a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Any = self.get_dummy_inputs()
a__ : List[Any] = pipe(**lowerCamelCase__ ).images
a__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Optional[int] = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase( self : Any ):
a__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : int = pipe(**lowerCamelCase__ ).images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Optional[int] = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase( self : Any ):
a__ : Any = ort.SessionOptions()
a__ : Tuple = False
return options
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
a__ : int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
a__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Any = "A fantasy landscape, trending on artstation"
a__ : Dict = torch.manual_seed(0 )
a__ : Tuple = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="np" , )
a__ : Any = output.images
a__ : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a__ : List[str] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
a__ : Optional[Any] = init_image.resize((128, 128) )
a__ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
a__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[int] = "A fantasy landscape, trending on artstation"
a__ : Optional[int] = torch.manual_seed(0 )
a__ : Tuple = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type="np" , )
a__ : Dict = output.images
a__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
a__ : Tuple = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 37 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = ["input_features", "attention_mask"]
def __init__( self , A_=80 , A_=16_000 , A_=0.0 , A_=10 , A_=25 , A_="hamming_window" , A_=32_768.0 , A_=0.97 , A_=1.0 , A_=True , A_=True , A_=False , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase = feature_size
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = frame_signal_scale
UpperCamelCase = preemphasis_coeff
UpperCamelCase = mel_floor
UpperCamelCase = normalize_means
UpperCamelCase = normalize_vars
UpperCamelCase = win_function
UpperCamelCase = return_attention_mask
UpperCamelCase = win_length * sampling_rate // 1_000
UpperCamelCase = hop_length * sampling_rate // 1_000
UpperCamelCase = optimal_fft_length(self.sample_size )
UpperCamelCase = (self.n_fft // 2) + 1
def __UpperCamelCase ( self , A_ ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
else:
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCamelCase = x[:input_length].mean(axis=0 )
UpperCamelCase = np.subtract(A_ , A_ )
if self.normalize_vars:
UpperCamelCase = x[:input_length].std(axis=0 )
UpperCamelCase = np.divide(A_ , A_ )
if input_length < x.shape[0]:
UpperCamelCase = padding_value
# make sure array is in float32
UpperCamelCase = x.astype(np.floataa )
return x
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[np.ndarray]:
"""simple docstring"""
UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )]
def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [raw_speech]
# extract fbank features
UpperCamelCase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase = BatchFeature({'input_features': features} )
UpperCamelCase = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , A_ ):
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=A_ )
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
A : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A : Tuple = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
A : List[str] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
A : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( a_ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = LxmertTokenizer
def __init__( self :Optional[Any] , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Optional[int]="[UNK]" , lowerCamelCase_ :Optional[int]="[SEP]" , lowerCamelCase_ :Tuple="[PAD]" , lowerCamelCase_ :Optional[int]="[CLS]" , lowerCamelCase_ :List[Any]="[MASK]" , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowercase ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(_lowercase , normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**_lowercase )
UpperCamelCase__ = do_lower_case
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int]=None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase ) | 516 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__snake_case : Optional[int] = logging.getLogger(__name__)
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> Tuple:
a_ : str = self.layer[current_layer](_lowercase , _lowercase , head_mask[current_layer] )
a_ : str = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
super().__init__(_lowercase )
a_ : Tuple = BertEncoderWithPabee(_lowercase )
self.init_weights()
a_ : int = 0
a_ : Any = 0
a_ : Tuple = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self , _lowercase ) -> Tuple:
a_ : Dict = threshold
def UpperCamelCase__ ( self , _lowercase ) -> List[Any]:
a_ : Optional[int] = patience
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
a_ : Optional[int] = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a_ : Dict = input_ids.size()
elif inputs_embeds is not None:
a_ : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a_ : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
a_ : List[str] = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a_ , a_ , a_ : int = encoder_hidden_states.size()
a_ : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
a_ : List[Any] = self.invert_attention_mask(_lowercase )
else:
a_ : Optional[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a_ : List[Any] = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
a_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
a_ : List[Any] = embedding_output
if self.training:
a_ : Any = []
for i in range(self.config.num_hidden_layers ):
a_ : int = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : List[Any] = self.pooler(_lowercase )
a_ : Optional[int] = output_layers[i](output_dropout(_lowercase ) )
res.append(_lowercase )
elif self.patience == 0: # Use all layers for inference
a_ : Union[str, Any] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Union[str, Any] = self.pooler(encoder_outputs[0] )
a_ : List[str] = [output_layers[self.config.num_hidden_layers - 1](_lowercase )]
else:
a_ : Any = 0
a_ : Dict = None
a_ : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a_ : Optional[Any] = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : Optional[int] = self.pooler(_lowercase )
a_ : int = output_layers[i](_lowercase )
if regression:
a_ : Dict = logits.detach()
if patient_result is not None:
a_ : Optional[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a_ : Optional[Any] = 0
else:
a_ : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
a_ : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_lowercase ) ):
patient_counter += 1
else:
a_ : Tuple = 0
a_ : Union[str, Any] = logits
if patient_counter == self.patience:
break
a_ : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> str:
super().__init__(_lowercase )
a_ : str = config.num_labels
a_ : Optional[Any] = BertModelWithPabee(_lowercase )
a_ : int = nn.Dropout(config.hidden_dropout_prob )
a_ : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Tuple:
a_ : Optional[Any] = self.bert(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a_ : Optional[Any] = (logits[-1],)
if labels is not None:
a_ : int = None
a_ : Union[str, Any] = 0
for ix, logits_item in enumerate(_lowercase ):
if self.num_labels == 1:
# We are doing regression
a_ : Any = MSELoss()
a_ : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a_ : Any = CrossEntropyLoss()
a_ : int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a_ : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a_ : Any = (total_loss / total_weights,) + outputs
return outputs
| 540 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (__magic_name__ ):
snake_case :str = ["image_processor", "tokenizer"]
snake_case :Union[str, Any] = "LayoutLMv3ImageProcessor"
snake_case :List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
__UpperCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
__UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__UpperCAmelCase : str = self.image_processor(images=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCAmelCase : Dict = features["words"]
__UpperCAmelCase : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel values
__UpperCAmelCase : Tuple = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__UpperCAmelCase : List[str] = self.get_overflowing_images(UpperCamelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
__UpperCAmelCase : List[str] = images
return encoded_inputs
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f""" {len(UpperCamelCase_ )} and {len(UpperCamelCase_ )}""" )
return images_with_overflow
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase_ , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase_ , )
return self.image_processor
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ = BertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 205 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[str] , snake_case : int , snake_case : Optional[int]=1_3 , snake_case : List[str]=3_0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=3_2 , snake_case : int=5 , snake_case : int=4 , snake_case : List[str]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.1 , snake_case : Dict=0.1 , snake_case : Any=1_0 , snake_case : Any=0.02 , snake_case : int=3 , snake_case : int=0.6 , snake_case : str=None , ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : Optional[int] = patch_size
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Optional[int] = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = type_sequence_label_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : Union[str, Any] = mask_ratio
UpperCamelCase_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : int = (image_size // patch_size) ** 2
UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : Optional[int] , snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Optional[int] = model(snake_case )
UpperCamelCase_ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Tuple = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = config_and_inputs
UpperCamelCase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ViTMAEModelTester(self )
UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
UpperCamelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : Optional[Any] = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Optional[int] = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Any = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : int = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Tuple = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : str = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : Optional[Any] = ViTMAEConfig()
UpperCamelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Optional[int] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 417 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =parent
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return {}
def _A ( ):
"""simple docstring"""
__lowercase ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
__lowercase ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =MarkupLMFeatureExtractionTester(self)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.feature_extraction_class()
# Test not batched input
__lowercase =get_html_strings()[0]
__lowercase =feature_extractor(_lowerCAmelCase)
# fmt: off
__lowercase =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
__lowercase =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCAmelCase)
self.assertEqual(encoding.xpaths , _lowerCAmelCase)
# Test batched
__lowercase =get_html_strings()
__lowercase =feature_extractor(_lowerCAmelCase)
# fmt: off
__lowercase =expected_nodes + [['My First Heading', 'My first paragraph.']]
__lowercase =expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , _lowerCAmelCase)
self.assertEqual(encoding.xpaths , _lowerCAmelCase)
| 454 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = BartTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str="replace" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Dict="</s>" , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Dict="<mask>" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase ='post_processor'
__lowercase =getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
if tokenizer_component_instance:
__lowercase =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase =tuple(state['sep'])
if "cls" in state:
__lowercase =tuple(state['cls'])
__lowercase =False
if state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =add_prefix_space
__lowercase =True
if state.get('trim_offsets' , _lowerCAmelCase) != trim_offsets:
__lowercase =trim_offsets
__lowercase =True
if changes_to_apply:
__lowercase =getattr(_lowerCAmelCase , state.pop('type'))
__lowercase =component_class(**_lowerCAmelCase)
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else value
__lowercase =value
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None):
'''simple docstring'''
__lowercase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 454 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : List[str] = ["transformers", "torch", "note_seq"]
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) | 90 |
"""simple docstring"""
from timeit import timeit
def UpperCAmelCase ( snake_case : int ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_lowerCAmelCase:str = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase ( snake_case : int ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_lowerCAmelCase:Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase ( ):
def do_benchmark(snake_case : int ) -> None:
_lowerCAmelCase:Optional[int] = '''import __main__ as z'''
print(F'Benchmark when {number = }:' )
print(F'{get_set_bits_count_using_modulo_operator(snake_case ) = }' )
_lowerCAmelCase:List[Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case )
print(F'timeit() runs in {timing} seconds' )
print(F'{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }' )
_lowerCAmelCase:List[str] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case , )
print(F'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 227 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( a : Optional[int] , a : str=False ):
a__ = OmegaConf.load(a )
if display:
print(yaml.dump(OmegaConf.to_container(a ) ) )
return config
def lowerCAmelCase_ ( a : str , a : Optional[Any]=None , a : Any=None ):
if conf_path is None:
a__ = './model_checkpoints/vqgan_only.yaml'
a__ = load_config(a , display=a )
a__ = VQModel(**config.model.params )
if ckpt_path is None:
a__ = './model_checkpoints/vqgan_only.pt'
a__ = torch.load(a , map_location=a )
if ".ckpt" in ckpt_path:
a__ = sd['state_dict']
model.load_state_dict(a , strict=a )
model.to(a )
del sd
return model
def lowerCAmelCase_ ( a : Union[str, Any] , a : Tuple ):
a__ , a__ , a__ = model.encode(a )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
a__ = model.decode(a )
return xrec
def lowerCAmelCase_ ( a : int , a : Any=False ):
a__ , a__ = string.rsplit('.' , 1 )
if reload:
a__ = importlib.import_module(a )
importlib.reload(a )
return getattr(importlib.import_module(a , package=a ) , cls )
def lowerCAmelCase_ ( a : List[str] ):
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCAmelCase_ ( a : Any , a : Union[str, Any] , a : Optional[int]=True , a : Optional[Any]=True ):
a__ = instantiate_from_config(a )
if sd is not None:
model.load_state_dict(a )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( a : int , a : List[str] , a : Dict , a : List[str] ):
# load the specified checkpoint
if ckpt:
a__ = torch.load(a , map_location='cpu' )
a__ = pl_sd['global_step']
print(f'''loaded model from global step {global_step}.''' )
else:
a__ = {'state_dict': None}
a__ = None
a__ = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=a , eval_mode=a )['model']
return model, global_step
| 719 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Any = 'pt'
elif is_tf_available():
__A : List[str] = 'tf'
else:
__A : Union[str, Any] = 'jax'
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = PerceiverTokenizer
SCREAMING_SNAKE_CASE:Optional[int] = False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def lowercase__ ( self , _a , _a=False , _a=20 , _a=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a__ = []
for i in range(len(_a ) ):
try:
a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=_a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda _a : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _a ) )
a__ = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
a__ = ' ' + output_txt
a__ = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = 'Unicode €.'
a__ = tokenizer(_a )
a__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _a )
self.assertIn('attention_mask' , _a )
self.assertNotIn('decoder_input_ids' , _a )
self.assertNotIn('decoder_attention_mask' , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = [
'Summary of the text.',
'Another summary.',
]
a__ = tokenizer(
text_target=_a , max_length=32 , padding='max_length' , truncation=_a , return_tensors=_a )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
a__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a__ = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
a__ = [F'''<extra_id_{i}>''' for i in range(125 )]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
_a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_a )]
a__ = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a__ = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a__ = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(_a , _a )
| 126 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = ['input_ids', 'attention_mask']
_lowercase = []
_lowercase = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Tuple =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ : str ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Tuple ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : str =1
SCREAMING_SNAKE_CASE_ : Tuple =len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Tuple ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
SCREAMING_SNAKE_CASE_ : List[str] ={v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ : int =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ : List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Any =None
return state
def __setstate__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any ={}
SCREAMING_SNAKE_CASE_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : List[Any] =self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , __UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =[]
SCREAMING_SNAKE_CASE_ : str =''
SCREAMING_SNAKE_CASE_ : List[str] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Dict =[]
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ : str =os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : List[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : List[str] =src_lang
SCREAMING_SNAKE_CASE_ : Dict =self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.convert_tokens_to_ids(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =tgt_lang_id
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Dict =src_lang
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_ : int =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
| 220 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=False , ) -> str:
"""simple docstring"""
__magic_name__ : List[str] = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ : List[str] = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : List[str] = num_channels
__magic_name__ : str = image_size
__magic_name__ : Union[str, Any] = min_resolution
__magic_name__ : Optional[int] = max_resolution
__magic_name__ : List[Any] = do_resize
__magic_name__ : Tuple = size
__magic_name__ : int = do_center_crop
__magic_name__ : Optional[Any] = crop_size
__magic_name__ : Tuple = do_normalize
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Any = do_reduce_labels
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
__magic_name__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
__magic_name__ : int = Image.open(dataset[0]['''file'''] )
__magic_name__ : Union[str, Any] = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
__magic_name__ : Any = Image.open(ds[0]['''file'''] )
__magic_name__ : Tuple = Image.open(ds[1]['''file'''] )
__magic_name__ : Optional[Any] = Image.open(ds[2]['''file'''] )
__magic_name__ : Optional[int] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
lowerCamelCase__ : List[str] =BeitImageProcessor if is_vision_available() else None
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Tuple = BeitImageProcessingTester(self )
@property
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) )
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
__magic_name__ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
pass
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
__magic_name__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ : Tuple = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
__magic_name__ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
__magic_name__ : Optional[Any] = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ : Any = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ : Union[str, Any] = prepare_semantic_single_inputs()
__magic_name__ : List[Any] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ : Optional[Any] = prepare_semantic_batch_inputs()
__magic_name__ : Tuple = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ : Optional[Any] = prepare_semantic_single_inputs()
__magic_name__ : Optional[int] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ : Tuple = True
__magic_name__ : List[str] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 702 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, ) ->Tuple:
"""simple docstring"""
if config_name_or_path is None:
__magic_name__ : Dict = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
__magic_name__ : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__magic_name__ : Union[str, Any] = question_encoder_name_or_path
__magic_name__ : Dict = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
__magic_name__ : str = RagConfig.from_pretrained(UpperCAmelCase )
__magic_name__ : Dict = AutoConfig.from_pretrained(UpperCAmelCase )
__magic_name__ : str = AutoConfig.from_pretrained(UpperCAmelCase )
__magic_name__ : Tuple = gen_config
__magic_name__ : str = question_encoder_config
__magic_name__ : str = model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase, UpperCAmelCase, config=UpperCAmelCase )
rag_model.save_pretrained(UpperCAmelCase )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase )
# Save tokenizers.
__magic_name__ : Any = AutoTokenizer.from_pretrained(UpperCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
__magic_name__ : List[str] = AutoTokenizer.from_pretrained(UpperCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowercase_ = parser.parse_args()
lowercase_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 336 | 0 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''image_processor''', '''feature_extractor''']
UpperCamelCase_ : Union[str, Any] = '''TvltImageProcessor'''
UpperCamelCase_ : Optional[Any] = '''TvltFeatureExtractor'''
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
super().__init__(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor
SCREAMING_SNAKE_CASE : str = feature_extractor
def __call__( self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[Any]=False , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] , ):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if images is not None:
SCREAMING_SNAKE_CASE : int = self.image_processor(UpperCAmelCase_ , mask_pixel=UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE : Dict = self.image_processor(UpperCAmelCase_ , is_mixed=UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if audio is not None:
SCREAMING_SNAKE_CASE : Any = self.feature_extractor(
UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , mask_audio=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = {}
if audio is not None:
output_dict.update(UpperCAmelCase_ )
if images is not None:
output_dict.update(UpperCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase_ )
return output_dict
@property
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 62 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase__ = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
A__ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A__ = "<|endoftext|>" if eos_token is None else eos_token
A__ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A__ = unk_token if pad_token is None else pad_token
A__ = eos_token if bos_token is None else bos_token
else:
A__ = "<pad>" if pad_token is None else pad_token
A__ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A__ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A__ = re.compile(
F'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = self.non_printing_characters_re.sub("" , lowercase )
# Normalize whitespaces
A__ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
A__ = unicodedata.normalize("NFC" , lowercase )
return text
def UpperCamelCase ( self , lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def UpperCamelCase ( lowercase ) -> str:
'''simple docstring'''
return out_string
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = []
A__ = ""
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(lowercase )
A__ = False
out_string += self.sp_model.decode(lowercase )
return out_string
def UpperCamelCase ( self ) -> Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def UpperCamelCase ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = self.preprocess_text(lowercase )
A__ = self.sp_model.encode(lowercase )
else:
A__ = [self.preprocess_text(lowercase ) for t in text]
A__ = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
A__ = torch.tensor(lowercase )
return token_ids
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.sp_model.decode(lowercase )
def UpperCamelCase ( self , lowercase ) -> List[int]:
'''simple docstring'''
A__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
A__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(lowercase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 514 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : str = 'sew-d'
def __init__( self : List[str] , lowercase__ : int=32 , lowercase__ : Tuple=7_68 , lowercase__ : Any=12 , lowercase__ : Any=12 , lowercase__ : Union[str, Any]=30_72 , lowercase__ : str=2 , lowercase__ : Any=5_12 , lowercase__ : str=2_56 , lowercase__ : Tuple=True , lowercase__ : Dict=True , lowercase__ : Optional[Any]=("p2c", "c2p") , lowercase__ : str="layer_norm" , lowercase__ : int="gelu_python" , lowercase__ : Tuple=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Dict=0.0 , lowercase__ : Tuple=0.1 , lowercase__ : List[str]=0.02 , lowercase__ : Optional[Any]=1e-7 , lowercase__ : Optional[Any]=1e-5 , lowercase__ : int="group" , lowercase__ : Any="gelu" , lowercase__ : int=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__ : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__ : List[str]=False , lowercase__ : Any=1_28 , lowercase__ : Optional[Any]=16 , lowercase__ : Optional[Any]=True , lowercase__ : Dict=0.05 , lowercase__ : Union[str, Any]=10 , lowercase__ : List[Any]=2 , lowercase__ : Optional[Any]=0.0 , lowercase__ : str=10 , lowercase__ : List[str]=0 , lowercase__ : Tuple="mean" , lowercase__ : Dict=False , lowercase__ : Tuple=False , lowercase__ : Optional[int]=2_56 , lowercase__ : Optional[int]=0 , lowercase__ : Optional[Any]=1 , lowercase__ : Union[str, Any]=2 , **lowercase__ : List[str] , ) ->List[Any]:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
_lowercase = hidden_size
_lowercase = feat_extract_norm
_lowercase = feat_extract_activation
_lowercase = list(lowercase__)
_lowercase = list(lowercase__)
_lowercase = list(lowercase__)
_lowercase = conv_bias
_lowercase = num_conv_pos_embeddings
_lowercase = num_conv_pos_embedding_groups
_lowercase = len(self.conv_dim)
_lowercase = num_hidden_layers
_lowercase = intermediate_size
_lowercase = squeeze_factor
_lowercase = max_position_embeddings
_lowercase = position_buckets
_lowercase = share_att_key
_lowercase = relative_attention
_lowercase = norm_rel_ebd
_lowercase = list(lowercase__)
_lowercase = hidden_act
_lowercase = num_attention_heads
_lowercase = hidden_dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = feat_proj_dropout
_lowercase = final_dropout
_lowercase = layer_norm_eps
_lowercase = feature_layer_norm_eps
_lowercase = initializer_range
_lowercase = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
# ctc loss
_lowercase = ctc_loss_reduction
_lowercase = ctc_zero_infinity
# sequence classification
_lowercase = use_weighted_layer_sum
_lowercase = classifier_proj_size
@property
def _UpperCAmelCase ( self : Any) ->Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 572 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ = 100 ):
_lowercase = set()
_lowercase = 0
_lowercase = n + 1 # maximum limit
for a in range(2 , snake_case_ ):
for b in range(2 , snake_case_ ):
_lowercase = a**b # calculates the current power
collect_powers.add(snake_case_ ) # adds the result to the set
return len(snake_case_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 572 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=[2, 3, 4], A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : str = num_stages
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Any = out_indices
SCREAMING_SNAKE_CASE : Dict = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=A, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A : List[str] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A : Any = False
A : Tuple = False
A : Union[str, Any] = False
A : Any = False
A : List[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Tuple = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
SCREAMING_SNAKE_CASE : Optional[int] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(A )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 28 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : List[Any] , A_ : Optional[Any] , A_ : str=1_3 , A_ : Optional[Any]=3_2 , A_ : Tuple=3 , A_ : Optional[int]=4 , A_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , A_ : List[str]=[2, 2, 3, 2] , A_ : str=True , A_ : Optional[Any]=True , A_ : str=3_7 , A_ : Any="gelu" , A_ : Tuple=1_0 , A_ : Optional[int]=0.02 , A_ : int=["stage2", "stage3", "stage4"] , A_ : Optional[Any]=[2, 3, 4] , A_ : Any=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : List[Any] , A_ : int , A_ : int ):
'''simple docstring'''
__lowercase = ConvNextModel(config=A_ )
model.to(A_ )
model.eval()
__lowercase = model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : Tuple , A_ : List[str] ):
'''simple docstring'''
__lowercase = ConvNextForImageClassification(A_ )
model.to(A_ )
model.eval()
__lowercase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Optional[int] , A_ : List[str] , A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = ConvNextBackbone(config=A_ )
model.to(A_ )
model.eval()
__lowercase = model(A_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=A_ )
model.to(A_ )
model.eval()
__lowercase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a : str = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a : str = True
a : List[str] = False
a : str = False
a : Optional[int] = False
a : str = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(A_ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(A_ : Union[str, Any] , A_ : Optional[int] , A_ : int ):
__lowercase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(A_ , A_ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(A_ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
__lowercase = model(**A_ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A_ )
__lowercase = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , _a ):
a : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
a : Dict = ConvNextConfig
a : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = ConvNextModelTester(self )
| 706 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ =OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModel)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 442 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : str = 2_00 ):
'''simple docstring'''
_lowerCamelCase : Dict = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_lowerCamelCase : Union[str, Any] = [0] * (pence + 1)
_lowerCamelCase : List[str] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCAmelCase, pence + 1, 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 83 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Tuple = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowerCamelCase__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : Tuple = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 295 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
class _A ( nn.Module ):
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__lowerCamelCase : jnp.dtype = jnp.floataa
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
snake_case : str = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case : List[str] = self.block_out_channels[i]
snake_case : str = self.block_out_channels[i + 1]
snake_case : List[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(SCREAMING_SNAKE_CASE_ )
snake_case : str = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = blocks
snake_case : Dict = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = self.conv_in(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = nn.silu(SCREAMING_SNAKE_CASE_ )
for block in self.blocks:
snake_case : List[Any] = block(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = nn.silu(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = self.conv_out(SCREAMING_SNAKE_CASE_ )
return embedding
@flax_register_to_config
class _A ( nn.Module , snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : int = 3_2
__lowerCamelCase : int = 4
__lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCamelCase : Union[bool, Tuple[bool]] = False
__lowerCamelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__lowerCamelCase : int = 2
__lowerCamelCase : Union[int, Tuple[int]] = 8
__lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
__lowerCamelCase : int = 1_2_8_0
__lowerCamelCase : float = 0.0
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : str = "rgb"
__lowerCamelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# init input tensors
snake_case : Any = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(SCREAMING_SNAKE_CASE_ ,dtype=jnp.floataa )
snake_case : List[str] = jnp.ones((1,) ,dtype=jnp.intaa )
snake_case : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
snake_case : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case : Tuple = jnp.zeros(SCREAMING_SNAKE_CASE_ ,dtype=jnp.floataa )
snake_case , snake_case : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )["params"]
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.block_out_channels
snake_case : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Dict = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
snake_case : Dict = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
snake_case : Union[str, Any] = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE_ ,dtype=self.dtype )
snake_case : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
snake_case : str = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : Any = []
snake_case : Any = []
snake_case : str = block_out_channels[0]
snake_case : str = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : Optional[Any] = output_channel
snake_case : Optional[int] = block_out_channels[i]
snake_case : Dict = i == len(SCREAMING_SNAKE_CASE_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : Dict = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
snake_case : int = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(SCREAMING_SNAKE_CASE_ )
for _ in range(self.layers_per_block ):
snake_case : Dict = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
if not is_final_block:
snake_case : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = down_blocks
snake_case : List[Any] = controlnet_down_blocks
# mid
snake_case : Optional[int] = block_out_channels[-1]
snake_case : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=SCREAMING_SNAKE_CASE_ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
snake_case : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 1.0 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case : Union[str, Any] = jnp.flip(SCREAMING_SNAKE_CASE_ ,axis=1 )
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE_ ,jnp.ndarray ):
snake_case : Any = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE_ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : int = timesteps.astype(dtype=jnp.floataa )
snake_case : Optional[int] = jnp.expand_dims(SCREAMING_SNAKE_CASE_ ,0 )
snake_case : List[Any] = self.time_proj(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.time_embedding(SCREAMING_SNAKE_CASE_ )
# 2. pre-process
snake_case : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE_ ,(0, 2, 3, 1) )
snake_case : Union[str, Any] = self.conv_in(SCREAMING_SNAKE_CASE_ )
snake_case : str = jnp.transpose(SCREAMING_SNAKE_CASE_ ,(0, 2, 3, 1) )
snake_case : Optional[int] = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE_ )
sample += controlnet_cond
# 3. down
snake_case : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case , snake_case : str = down_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
else:
snake_case , snake_case : Dict = down_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,deterministic=not train )
# 5. contronet blocks
snake_case : int = ()
for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE_ ,self.controlnet_down_blocks ):
snake_case : List[Any] = controlnet_block(SCREAMING_SNAKE_CASE_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case : Any = controlnet_down_block_res_samples
snake_case : Optional[int] = self.controlnet_mid_block(SCREAMING_SNAKE_CASE_ )
# 6. scaling
snake_case : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=SCREAMING_SNAKE_CASE_ ,mid_block_res_sample=SCREAMING_SNAKE_CASE_ )
| 315 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = TextToVideoSDPipeline
__lowerCamelCase : Dict = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCamelCase : str = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
snake_case : int = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="""scaled_linear""" ,clip_sample=SCREAMING_SNAKE_CASE_ ,set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,)
torch.manual_seed(0 )
snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
snake_case : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
snake_case : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
snake_case : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
snake_case : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = """np"""
snake_case : List[str] = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
snake_case : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
snake_case : Optional[Any] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def snake_case_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ ,expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
snake_case : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case : List[str] = pipe.to("""cuda""" )
snake_case : Optional[Any] = """Spiderman is surfing"""
snake_case : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : str = pipe(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=25 ,output_type="""pt""" ).frames
snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
snake_case : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : Dict = pipe.to("""cuda""" )
snake_case : Tuple = """Spiderman is surfing"""
snake_case : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Any = pipe(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""pt""" ).frames
snake_case : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 315 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.