code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase_ = re.compile(r'\s+')
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
_a = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=5 ) -> Tuple:
_a = ['auto-generated', 'autogenerated', 'automatically generated']
_a = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ) -> Dict:
_a = ['unit tests', 'test file', 'configuration file']
_a = example['content'].splitlines()
_a = 0
_a = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_a = example['content'].count('\n' )
_a = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
_a = ['def ', 'class ', 'for ', 'while ']
_a = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=4 ) -> str:
_a = example['content'].splitlines()
_a = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
_a = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_a = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
_a = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowercase_ = HfArgumentParser(PreprocessingArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowercase_ = time.time()
lowercase_ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowercase_ = set(ds.unique('hash'))
lowercase_ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowercase_ = time.time()
lowercase_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase_ = time.time()
lowercase_ , lowercase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowercase_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowercase_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowercase_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase_ = str(data_dir / f"""file-{file_number+1:012}.json""")
lowercase_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 562 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCAmelCase , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_a = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith('_token_id' ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(_UpperCAmelCase )
_a = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_a = unused_attributes
if len(_UpperCAmelCase ) > 0:
_a = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 562 | 1 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
for ch in input_str:
A_ = ord(SCREAMING_SNAKE_CASE )
A_ = pow(2 , SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[int] = 'Wav2Vec2FeatureExtractor'
_lowercase : int = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
A_ = self.feature_extractor
A_ = False
@classmethod
def UpperCamelCase ( cls : Optional[int] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCamelCase__ , )
A_ = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A_ = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A_ = kwargs.pop('''raw_speech''' )
else:
A_ = kwargs.pop('''audio''' , lowerCamelCase__ )
A_ = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
A_ = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A_ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
A_ = kwargs.pop('''input_features''' , lowerCamelCase__ )
A_ = kwargs.pop('''labels''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if input_features is not None:
A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self : Optional[int] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Any , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A_ = True
A_ = self.tokenizer
yield
A_ = self.feature_extractor
A_ = False
| 563 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__magic_name__ ) , "Tatoeba directory does not exist." )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self.resolver.convert_models(['heb-eng'] )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.resolver.write_model_card('opus-mt-he-en' , dry_run=SCREAMING_SNAKE_CASE__ )
assert mmeta["long_pair"] == "heb-eng"
| 282 |
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = probability
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
return list(self.connections )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> dict[str, int]:
UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase , _lowercase , _lowercase )
UpperCamelCase = Counter(graph.get_nodes() )
UpperCamelCase = start
for _ in range(_lowercase ):
UpperCamelCase = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 344 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ ) | 344 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Callable , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
UpperCamelCase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase_ : List[str] = np.zeros((n + 1,) )
UpperCamelCase_ : Dict = ya
UpperCamelCase_ : str = xa
for k in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Union[str, Any] = y[k] + step_size * ode_func(_SCREAMING_SNAKE_CASE , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = "▁"
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
SCREAMING_SNAKE_CASE : Any = {
"google/reformer-crime-and-punishment": 524288,
}
class UpperCamelCase ( __a ):
a__ :Optional[Any] = VOCAB_FILES_NAMES
a__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ :List[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , __UpperCamelCase , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase=[] , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
UpperCamelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ : int = vocab_file
UpperCamelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def A_ (self ) -> Dict:
return self.sp_model.get_piece_size()
def A_ (self ) -> Dict[str, int]:
UpperCamelCase_ : str = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Any:
UpperCamelCase_ : Dict = self.__dict__.copy()
UpperCamelCase_ : List[str] = None
return state
def __setstate__(self , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ (self , __UpperCamelCase ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def A_ (self , __UpperCamelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(__UpperCamelCase )
def A_ (self , __UpperCamelCase ) -> Dict:
if index < self.sp_model.get_piece_size():
UpperCamelCase_ : Union[str, Any] = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def A_ (self , __UpperCamelCase ) -> int:
UpperCamelCase_ : Dict = []
UpperCamelCase_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
UpperCamelCase_ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : Tuple = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 635 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=2 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = patch_size
snake_case_ = max_length
snake_case_ = num_mel_bins
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = frequency_stride
snake_case_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case_ = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case_ = frequency_out_dimension * time_out_dimension
snake_case_ = num_patches + 2
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__A = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ASTModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['input_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
snake_case_ , snake_case_ = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.default_feature_extractor
snake_case_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__UpperCamelCase )
snake_case_ = self.default_feature_extractor
snake_case_ , snake_case_ = prepare_audio()
snake_case_ = audio.squeeze().numpy()
snake_case_ = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def __lowerCAmelCase ( self ):
"""simple docstring"""
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case_ = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( _UpperCamelCase : str = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _lowerCAmelCase ( _UpperCamelCase : str = "" ) -> bool:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return True
_SCREAMING_SNAKE_CASE =input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_SCREAMING_SNAKE_CASE ={}
for character in lower_case_input_str:
_SCREAMING_SNAKE_CASE =character_freq_dict.get(_UpperCamelCase , 0 ) + 1
_SCREAMING_SNAKE_CASE =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : str = "" ) -> None:
"""simple docstring"""
print('\nFor string = ' , _UpperCamelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCamelCase : List[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowerCamelCase : Union[str, Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 405 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self : List[str] , _a : Dict , _a : Dict=13 , _a : Union[str, Any]=7 , _a : Dict=True , _a : Any=True , _a : Optional[int]=True , _a : List[Any]=True , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : Union[str, Any]=4 , _a : Dict=37 , _a : List[str]="gelu" , _a : Tuple=0.1 , _a : Optional[Any]=0.1 , _a : List[str]=512 , _a : Optional[Any]=16 , _a : List[Any]=2 , _a : int=0.02 , _a : Optional[Any]=3 , _a : Optional[Any]=4 , _a : Optional[Any]=None , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='gelu'
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] , _a : Dict , _a : Any , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[str] , _a : str , _a : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , _a : Optional[int] , _a : Tuple , _a : Any , _a : List[str] , _a : int , _a : Dict , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =TFRoFormerForCausalLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : List[str] , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , _a : List[Any] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , _a : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForSequenceClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _a : int , _a : str , _a : Any , _a : Tuple , _a : int , _a : List[str] , _a : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =TFRoFormerForMultipleChoice(config=_a )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , _a : Dict , _a : Any , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForTokenClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , _a : Tuple , _a : Tuple , _a : Tuple , _a : List[Any] , _a : Any , _a : int , _a : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForQuestionAnswering(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def A ( self : Any , _a : int , _a : str , _a : Dict , _a : Any , _a : Any ) -> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(_a )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE =5_0000
_SCREAMING_SNAKE_CASE =[1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE =emba(input_ids.shape )
_SCREAMING_SNAKE_CASE =tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_SCREAMING_SNAKE_CASE =emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE =embed_positions([2, 16, 768] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 405 | 1 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
assert x is not None
assert y is not None
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
_UpperCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1,m + 1 ):
for j in range(1,n + 1 ):
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_UpperCAmelCase = max(l[i - 1][j],l[i][j - 1],l[i - 1][j - 1] + match )
_UpperCAmelCase = ''
_UpperCAmelCase , _UpperCAmelCase = m, n
while i > 0 and j > 0:
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCAmelCase_ = '''AGGTAB'''
lowerCAmelCase_ = '''GXTXAYB'''
lowerCAmelCase_ = 4
lowerCAmelCase_ = '''GTAB'''
lowerCAmelCase_ , lowerCAmelCase_ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 494 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
lowerCAmelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case )
class lowerCAmelCase :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_UpperCAmelCase = titles if not isinstance(a__ , a__ ) else [titles]
_UpperCAmelCase = texts if not isinstance(a__ , a__ ) else [texts]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), f"""There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."""
_UpperCAmelCase = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class lowerCAmelCase ( snake_case , snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = DPRReaderTokenizer
| 494 | 1 |
from math import pi, sqrt
def _A ( _lowercase ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(_lowercase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(_lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case = 1.0
while num:
__snake_case = float(input('''Gamma of: '''))
print(f"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''')
| 1 |
def _lowerCamelCase ( __A : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(__A , (list, tuple) ) or not all(
isinstance(__A , __A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_UpperCAmelCase : str = numbers[0]
for i in range(1 , len(__A ) ):
# update the maximum and minimum subarray products
_UpperCAmelCase : str = numbers[i]
if number < 0:
_UpperCAmelCase , _UpperCAmelCase : Tuple = min_till_now, max_till_now
_UpperCAmelCase : str = max(__A , max_till_now * number )
_UpperCAmelCase : List[str] = min(__A , min_till_now * number )
# update the maximum product found till now
_UpperCAmelCase : Tuple = max(__A , __A )
return max_prod
| 485 | 0 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = BigBirdConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE__ :Tuple = BigBirdForQuestionAnswering(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Any = BigBirdForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 320 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : Any , UpperCamelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ :Any = nn.ModuleList(UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Union[torch.Tensor, float, int] , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : List[torch.tensor] , UpperCamelCase_ : List[float] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = controlnet(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE__ :int = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Callable = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = 0
SCREAMING_SNAKE_CASE__ :List[str] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE__ :str = model_path_to_save + f'''_{idx}'''
@classmethod
def __lowerCamelCase ( cls : str , UpperCamelCase_ : Optional[Union[str, os.PathLike]] , **UpperCamelCase_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = 0
SCREAMING_SNAKE_CASE__ :Tuple = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE__ :Dict = pretrained_model_path
while os.path.isdir(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
controlnets.append(UpperCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE__ :List[Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCamelCase_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(UpperCamelCase_ )
| 320 | 1 |
import os
from math import logaa
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = "base_exp.txt" ):
UpperCamelCase__ : float = 0
UpperCamelCase__ : Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) ):
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = list(map(UpperCamelCase__ , line.split(''',''' ) ) )
if x * logaa(UpperCamelCase__ ) > largest:
UpperCamelCase__ : List[str] = x * logaa(UpperCamelCase__ )
UpperCamelCase__ : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 285 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase ={
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCamelCase =logging.WARNING
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = os.getenv('''DATASETS_VERBOSITY''' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def SCREAMING_SNAKE_CASE_ ( ):
return __name__.split('''.''' )[0]
def SCREAMING_SNAKE_CASE_ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = None ):
if name is None:
UpperCamelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
_get_library_root_logger().setLevel(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : str = False
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase__ : Dict = args[0] if args else None
def __iter__( self ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
def empty_fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[int]:
"""simple docstring"""
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return
lowerCamelCase =True
class _lowerCamelCase :
"""simple docstring"""
def __call__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase =_tqdm_cls()
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
UpperCamelCase__ : Dict = True
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
UpperCamelCase__ : Union[str, Any] = False
| 285 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase ( _A ):
lowercase = """"""
lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase = None # compression type in fsspec. ex: "gzip"
lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , __magic_name__ : str = "" , __magic_name__ : List[Any] = None , __magic_name__ : List[str] = None , **__magic_name__ : Any ):
"""simple docstring"""
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase = fsspec.open(
__magic_name__ , mode="""rb""" , protocol=__magic_name__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase = os.path.basename(self.file.path.split("""::""" )[0] )
UpperCamelCase = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCamelCase = None
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , __magic_name__ : Dict ):
"""simple docstring"""
return super()._strip_protocol(__magic_name__ ).lstrip("""/""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if self.dir_cache is None:
UpperCamelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCamelCase = {f["name"]: f}
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.file.open().read()
def lowerCamelCase_ ( self : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] = "rb" , __magic_name__ : Tuple=None , __magic_name__ : int=True , __magic_name__ : List[Any]=None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase ( _A ):
lowercase = """bz2"""
lowercase = """bz2"""
lowercase = """.bz2"""
class UpperCAmelCase ( _A ):
lowercase = """gzip"""
lowercase = """gzip"""
lowercase = """.gz"""
class UpperCAmelCase ( _A ):
lowercase = """lz4"""
lowercase = """lz4"""
lowercase = """.lz4"""
class UpperCAmelCase ( _A ):
lowercase = """xz"""
lowercase = """xz"""
lowercase = """.xz"""
class UpperCAmelCase ( _A ):
lowercase = """zstd"""
lowercase = """zstd"""
lowercase = """.zst"""
def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple = "rb" , __magic_name__ : int = None , __magic_name__ : Any = None , __magic_name__ : int = DEFAULT_BLOCK_SIZE , **__magic_name__ : List[Any] , ):
"""simple docstring"""
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase = self.file.__enter__
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = file_
def __enter__( self : List[Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__( self : List[Any] ):
"""simple docstring"""
return iter(self._file )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ : Any , **__magic_name__ : Optional[Any] ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
UpperCamelCase = fixed_enter
| 713 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 100 ):
"""simple docstring"""
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 181 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[int] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowercase(_lowercase ):
__snake_case: Any = 'git_vision_model'
def __init__( self , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE="quick_gelu" , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , **__SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
a__ = hidden_size
a__ = intermediate_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = num_channels
a__ = patch_size
a__ = image_size
a__ = initializer_range
a__ = attention_dropout
a__ = layer_norm_eps
a__ = hidden_act
@classmethod
def lowercase__ ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
a__ , a__ = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
a__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowercase(_lowercase ):
__snake_case: Optional[Any] = 'git'
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1_0_1 , __SCREAMING_SNAKE_CASE=1_0_2 , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if vision_config is None:
a__ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
a__ = GitVisionConfig(**__SCREAMING_SNAKE_CASE )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = tie_word_embeddings
a__ = num_image_with_embedding
a__ = bos_token_id
a__ = eos_token_id
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 273 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : str = 'pt'
elif is_tf_available():
a : Dict = 'tf'
else:
a : Optional[Any] = 'jax'
class lowercase(_lowercase , unittest.TestCase ):
__snake_case: List[Any] = PerceiverTokenizer
__snake_case: Optional[Any] = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
try:
a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __SCREAMING_SNAKE_CASE ) )
a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0:
while len(__SCREAMING_SNAKE_CASE ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
a__ = ' ' + output_txt
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = 'Unicode €.'
a__ = tokenizer(__SCREAMING_SNAKE_CASE )
a__ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE )
# decoding
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE )
# decoding
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a__ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_input_ids' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = [
'Summary of the text.',
'Another summary.',
]
a__ = tokenizer(
text_target=__SCREAMING_SNAKE_CASE , max_length=3_2 , padding='max_length' , truncation=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
a__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(__SCREAMING_SNAKE_CASE )
a__ = [f'<extra_id_{i}>' for i in range(1_2_5 )]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__SCREAMING_SNAKE_CASE )]
a__ = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a__ = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase ( __lowercase ,__lowercase = True ,__lowercase = math.inf ,__lowercase = -math.inf ,__lowercase = math.inf ,__lowercase = -math.inf ,__lowercase = False ,__lowercase = 1_00 ,__lowercase = 0.01 ,__lowercase = 1 ,):
'''simple docstring'''
A_ : str = False
A_ : Any = search_prob
A_ : Optional[Any] = start_temperate
A_ : str = []
A_ : List[str] = 0
A_ : Optional[int] = None
while not search_end:
A_ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
A_ : str = current_state
scores.append(__lowercase )
iterations += 1
A_ : Optional[Any] = None
A_ : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
A_ : Optional[int] = random.randint(0 ,len(__lowercase ) - 1 ) # picking a random neighbor
A_ : Union[str, Any] = neighbors.pop(__lowercase )
A_ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
A_ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
A_ : Union[str, Any] = picked_neighbor
else:
A_ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
A_ : List[str] = picked_neighbor
A_ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
A_ : str = True
else:
A_ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowercase ) ,__lowercase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase ( __lowercase ,__lowercase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def UpperCamelCase ( __lowercase ,__lowercase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 701 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 0 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case : str , __snake_case : List[str] , __snake_case : Tuple=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_lowerCamelCase : List[str] = nn.Parameter(__snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_lowerCamelCase : Tuple = nn.Parameter(__snake_case )
def _snake_case ( __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = np.asarray(weights[0] )
_lowerCamelCase : Any = np.asarray(weights[1] )
_lowerCamelCase : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def _snake_case ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = np.asarray(weights[0] )
_lowerCamelCase : str = np.asarray(weights[1] )
_lowerCamelCase : Any = np.asarray(weights[2] )
_lowerCamelCase : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def _snake_case ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = weights[0][0][0]
_lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
_lowerCamelCase : str = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# lsh weights + output
_lowerCamelCase : Optional[Any] = weights[0][1]
if len(__snake_case ) < 4:
set_layer_weights_in_torch_lsh(__snake_case , torch_block.attention , __snake_case )
else:
set_layer_weights_in_torch_local(__snake_case , torch_block.attention , __snake_case )
# intermediate weighs
_lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(__snake_case ) == 4:
_lowerCamelCase : Any = intermediate_weights[2]
# layernorm 2
_lowerCamelCase : Any = np.asarray(intermediate_weights[0][0] )
_lowerCamelCase : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# intermediate dense
_lowerCamelCase : Dict = np.asarray(intermediate_weights[1][0] )
_lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
# intermediate out
_lowerCamelCase : Dict = np.asarray(intermediate_weights[4][0] )
_lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def _snake_case ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch_model.reformer
# word embeds
_lowerCamelCase : Optional[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__snake_case ) , )
if isinstance(weights[3] , __snake_case ):
_lowerCamelCase : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCamelCase : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_lowerCamelCase : Optional[int] = nn.Parameter(torch.tensor(__snake_case ) )
_lowerCamelCase : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCamelCase : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__snake_case , __snake_case , __snake_case )
# output layer norm
_lowerCamelCase : Any = np.asarray(weights[7][0] )
_lowerCamelCase : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# output embeddings
_lowerCamelCase : Union[str, Any] = np.asarray(weights[9][0] )
_lowerCamelCase : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def _snake_case ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ReformerConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : List[Any] = ReformerModelWithLMHead(__snake_case )
with open(__snake_case , """rb""" ) as f:
_lowerCamelCase : Optional[int] = pickle.load(__snake_case )["""weights"""]
set_model_weights_in_torch(__snake_case , __snake_case , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 88 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """whisper"""
lowerCAmelCase__ : Dict = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : int , UpperCamelCase : Optional[int]=51865 , UpperCamelCase : Any=80 , UpperCamelCase : Dict=6 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Tuple=1536 , UpperCamelCase : Dict=1536 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=50257 , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Dict=256 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[Any]=False , UpperCamelCase : int=1500 , UpperCamelCase : List[str]=448 , UpperCamelCase : int=50256 , UpperCamelCase : Optional[int]=50256 , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=[220, 50256] , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=256 , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.05 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=10 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=7 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
return common_inputs
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , UpperCamelCase : int = 22050 , UpperCamelCase : float = 5.0 , UpperCamelCase : int = 220 , ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowercase__ = encoder_inputs['''input_features'''].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = encoder_inputs.pop('''input_features''' )
lowercase__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 460 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : str = "audio"
SCREAMING_SNAKE_CASE_ : str = "transcription"
def a_ ( self : List[Any] , UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
_UpperCAmelCase : int = copy.deepcopy(self )
_UpperCAmelCase : Optional[Any] = self.input_schema.copy()
_UpperCAmelCase : str = features[self.audio_column]
_UpperCAmelCase : Tuple = input_schema
return task_template
@property
def a_ ( self : Any ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 416 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number | (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number & ~(1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number ^ (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return ((number >> position) & 1) == 1
def _A ( _UpperCamelCase , _UpperCamelCase ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 1 |
UpperCamelCase__ : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case_, snake_case_ ):
a = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(snake_case_ )
a = ''''''.join(bin(snake_case_ )[2:].zfill(8 ) for byte in data )
a = len(snake_case_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a = b'''=''' * ((6 - len(snake_case_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case_ ) % 6)
else:
a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(snake_case_ ), 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case_, snake_case_ ) and not isinstance(snake_case_, snake_case_ ):
a = (
'''argument should be a bytes-like object or ASCII string, '''
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(snake_case_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case_, snake_case_ ):
try:
a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a = encoded_data[:-padding]
a = ''''''.join(
bin(B64_CHARSET.index(snake_case_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a = ''''''.join(
bin(B64_CHARSET.index(snake_case_ ) )[2:].zfill(6 ) for char in encoded_data )
a = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(snake_case_ ), 8 )
]
return bytes(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 387 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = 0.0
for coeff in reversed(snake_case_ ):
a = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : int = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 387 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''tapas'''
def __init__( self : Any , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=10.0 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]="ratio" , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Dict , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_sizes
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Optional[int] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ : Optional[int] = positive_label_weight
UpperCamelCase__ : str = num_aggregation_labels
UpperCamelCase__ : Union[str, Any] = aggregation_loss_weight
UpperCamelCase__ : List[str] = use_answer_as_supervision
UpperCamelCase__ : List[str] = answer_loss_importance
UpperCamelCase__ : Tuple = use_normalized_answer_loss
UpperCamelCase__ : Optional[int] = huber_loss_delta
UpperCamelCase__ : Any = temperature
UpperCamelCase__ : int = aggregation_temperature
UpperCamelCase__ : str = use_gumbel_for_cells
UpperCamelCase__ : Dict = use_gumbel_for_aggregation
UpperCamelCase__ : List[Any] = average_approximation_function
UpperCamelCase__ : Dict = cell_selection_preference
UpperCamelCase__ : Any = answer_loss_cutoff
UpperCamelCase__ : str = max_num_rows
UpperCamelCase__ : Optional[Any] = max_num_columns
UpperCamelCase__ : Tuple = average_logits_per_cell
UpperCamelCase__ : Optional[int] = select_one_column
UpperCamelCase__ : Union[str, Any] = allow_empty_column_selection
UpperCamelCase__ : Tuple = init_cell_selection_weights_to_zero
UpperCamelCase__ : Dict = reset_position_index_per_cell
UpperCamelCase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ : Dict = aggregation_labels
UpperCamelCase__ : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = {int(UpperCAmelCase_): v for k, v in aggregation_labels.items()}
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
_A = []
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ) -> Any:
if row >= len(__SCREAMING_SNAKE_CASE ):
solution.append(__SCREAMING_SNAKE_CASE )
printboard(__SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : int = 1
solve(__SCREAMING_SNAKE_CASE , row + 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
return False
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] ) -> Dict:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_A = 8
_A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 158 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def A__ ( A : np.ndarray , A : Union[int, Iterable[int]] , A : bool , A : int):
'''simple docstring'''
def constraint_to_multiple_of(A : List[str] , A : Any , A : Optional[Any]=0 , A : Dict=None):
UpperCamelCase : Tuple = round(val / multiple) * multiple
if max_val is not None and x > max_val:
UpperCamelCase : Optional[Any] = math.floor(val / multiple) * multiple
if x < min_val:
UpperCamelCase : Dict = math.ceil(val / multiple) * multiple
return x
UpperCamelCase : Dict = (output_size, output_size) if isinstance(A , A) else output_size
UpperCamelCase , UpperCamelCase : str = get_image_size(A)
UpperCamelCase , UpperCamelCase : Any = output_size
# determine new height and width
UpperCamelCase : List[str] = output_height / input_height
UpperCamelCase : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
UpperCamelCase : List[str] = scale_width
else:
# fit height
UpperCamelCase : str = scale_height
UpperCamelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=A)
UpperCamelCase : int = constraint_to_multiple_of(scale_width * input_width , multiple=A)
return (new_height, new_width)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = True , lowerCamelCase = 1 / 2_55 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Dict = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase : int = get_size_dict(lowerCamelCase )
UpperCamelCase : str = do_resize
UpperCamelCase : int = size
UpperCamelCase : str = keep_aspect_ratio
UpperCamelCase : int = ensure_multiple_of
UpperCamelCase : Optional[int] = resample
UpperCamelCase : Optional[int] = do_rescale
UpperCamelCase : Any = rescale_factor
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase : Optional[Any] = get_resize_output_image_size(
lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowerCamelCase , multiple=lowerCamelCase , )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> Any:
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = size if size is not None else self.size
UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase )
UpperCamelCase : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase : str = resample if resample is not None else self.resample
UpperCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : List[str] = image_std if image_std is not None else self.image_std
UpperCamelCase : Optional[int] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase : Optional[Any] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase : Union[str, Any] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase : Dict = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
UpperCamelCase : List[str] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
UpperCamelCase : Dict = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCamelCase ):
UpperCamelCase : Dict = target_sizes.numpy()
UpperCamelCase : List[Any] = []
for idx in range(len(lowerCamelCase ) ):
UpperCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCamelCase )
UpperCamelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase )
else:
UpperCamelCase : Any = logits.argmax(dim=1 )
UpperCamelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 173 | 0 |
"""simple docstring"""
import numpy as np
def _A (__a ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase_ : List[Any] = 256047
UpperCAmelCase_ : Dict = 256145
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = NllbTokenizer
__UpperCamelCase = NllbTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = {}
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : int = NllbTokenizer(lowercase_ , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = NllbTokenizer(lowercase_ , keep_accents=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize('''This is a test''')
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
SCREAMING_SNAKE_CASE_ : Tuple = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(lowercase_ , lowercase_)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.save_pretrained(lowercase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
shutil.rmtree(lowercase_)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.test_seqaseq:
return
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Longer text that will definitely require truncation.
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE_ : int = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 10)
# max_target_length will default to max_length if not specified
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors='''pt''')
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 3)
SCREAMING_SNAKE_CASE_ : int = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''')
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3)
self.assertNotIn('''decoder_input_ids''' , lowercase_)
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
SCREAMING_SNAKE_CASE_ : Optional[int] = [AddedToken('''<special>''' , lstrip=lowercase_)]
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.encode('''Hey this is a <special> token''')
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.encode('''<special>''' , add_special_tokens=lowercase_)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
SCREAMING_SNAKE_CASE_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_p.encode('''Hey this is a <special> token''')
SCREAMING_SNAKE_CASE_ : Any = tokenizer_cr.encode('''Hey this is a <special> token''')
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "facebook/nllb-200-distilled-600M"
__UpperCamelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__UpperCamelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__UpperCamelCase = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''')
SCREAMING_SNAKE_CASE_ : Any = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256057)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.assertIn(lowercase_ , self.tokenizer.all_special_ids)
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertNotIn(self.tokenizer.eos_token , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowercase_)
SCREAMING_SNAKE_CASE_ : int = 10
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_).input_ids[0]
self.assertEqual(ids[-1] , 2)
self.assertEqual(ids[0] , lowercase_)
self.assertEqual(len(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) , [256203, 3])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NllbTokenizer.from_pretrained(lowercase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ : int = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''])
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 15) , batch.input_ids.shape)
self.assertEqual((2, 15) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[int] = shift_tokens_right(
lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''')
self.assertEqual(
nested_simplify(lowercase_) , {
# A, test, EOS, en_XX
'''input_ids''': [[256047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256057,
} , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''')
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047])
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''')
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2])
| 176 | 1 |
"""simple docstring"""
def A_ ( lowercase ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = []
for current_row_idx in range(lowercase ):
UpperCAmelCase_ : Optional[Any] = populate_current_row(lowercase , lowercase )
triangle.append(lowercase )
return triangle
def A_ ( lowercase , lowercase ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = 1, 1
for current_col_idx in range(1 , lowercase ):
calculate_current_element(
lowercase , lowercase , lowercase , lowercase )
return current_row
def A_ ( lowercase , lowercase , lowercase , lowercase , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : str = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ : int = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ : Any = above_to_left_elt + above_to_right_elt
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = [[1]]
for row_index in range(1 , lowercase ):
UpperCAmelCase_ : Any = [0] + result[-1] + [0]
UpperCAmelCase_ : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ : Dict = sum(divmod(lowercase , 2 ) )
UpperCAmelCase_ : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ : int = row_first_half + row_second_half
result.append(lowercase )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCAmelCase_ : int = f'''{func.__name__}({value})'''
UpperCAmelCase_ : int = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 470 |
"""simple docstring"""
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCAmelCase_ : Union[str, Any] = """"""
UpperCAmelCase_ : List[Any] = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCAmelCase_ ,UpperCAmelCase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCAmelCase_ : Dict = [1 for i in range(len(lowercase ) )]
# for each character in new_string find corresponding palindromic string
UpperCAmelCase_ : Optional[Any] = 0
for j in range(len(lowercase ) ):
UpperCAmelCase_ : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCAmelCase_ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCAmelCase_ : List[Any] = j - k + 1 # noqa: E741
UpperCAmelCase_ : Union[str, Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCAmelCase_ : Optional[Any] = length[j]
UpperCAmelCase_ : int = j
# create that string
UpperCAmelCase_ : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : int = '''summarization'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''loss''']
SCREAMING_SNAKE_CASE__ : Tuple = ROUGE_KEYS
SCREAMING_SNAKE_CASE__ : Dict = '''rouge2'''
def __init__( self , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE: int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_lowerCAmelCase , num_labels=_lowerCAmelCase , mode=self.mode , **_lowerCAmelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE: Optional[int] = Path(self.output_dir ) / '''metrics.json'''
__SCREAMING_SNAKE_CASE: List[Any] = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE: Dict = 0
__SCREAMING_SNAKE_CASE: Optional[int] = defaultdict(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.config.model_type
__SCREAMING_SNAKE_CASE: int = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
__SCREAMING_SNAKE_CASE: dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE: Tuple = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE: Tuple = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE: str = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE: Dict = get_git_info()['''repo_sha''']
__SCREAMING_SNAKE_CASE: List[str] = hparams.num_workers
__SCREAMING_SNAKE_CASE: Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE: Any = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE: Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE: Tuple = False
__SCREAMING_SNAKE_CASE: Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE: Optional[Any] = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE: str = self.model.config.max_length
__SCREAMING_SNAKE_CASE: List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_lowerCAmelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
__SCREAMING_SNAKE_CASE: int = True
return readable_batch
def snake_case_ ( self , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
return self.model(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return lmap(str.strip , _lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = batch['''input_ids'''], batch['''attention_mask''']
__SCREAMING_SNAKE_CASE: Optional[Any] = batch['''labels''']
if isinstance(self.model , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Tuple = self.model._shift_right(_lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE: Tuple = shift_tokens_right(_lowerCAmelCase , _lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE: Any = decoder_input_ids
self.save_readable_batch(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = self(_lowerCAmelCase , attention_mask=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE: List[Any] = nn.CrossEntropyLoss(ignore_index=_lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE: Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE: Any = nn.functional.log_softmax(_lowerCAmelCase , dim=-1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = label_smoothed_nll_loss(
_lowerCAmelCase , _lowerCAmelCase , self.hparams.label_smoothing , ignore_index=_lowerCAmelCase )
return (loss,)
@property
def snake_case_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self._step(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = dict(zip(self.loss_names , _lowerCAmelCase ) )
# tokens per batch
__SCREAMING_SNAKE_CASE: List[Any] = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE: int = batch['''input_ids'''].shape[0]
__SCREAMING_SNAKE_CASE: Optional[Any] = batch['''input_ids'''].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE: Optional[Any] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return self._generative_step(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase="val" ):
"""simple docstring"""
self.step_count += 1
__SCREAMING_SNAKE_CASE: Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE: int = losses['''loss''']
__SCREAMING_SNAKE_CASE: List[Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
__SCREAMING_SNAKE_CASE: str = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE: torch.FloatTensor = torch.tensor(_lowerCAmelCase ).type_as(_lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE: Tuple = self.step_count
self.metrics[prefix].append(_lowerCAmelCase ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE: str = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return calculate_rouge(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE: List[str] = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE: List[Any] = (time.time() - ta) / batch['''input_ids'''].shape[0]
__SCREAMING_SNAKE_CASE: List[str] = self.ids_to_clean_text(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = self.ids_to_clean_text(batch['''labels'''] )
__SCREAMING_SNAKE_CASE: str = self._step(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = dict(zip(self.loss_names , _lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Dict = self.calc_generative_metrics(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = np.mean(lmap(_lowerCAmelCase , _lowerCAmelCase ) )
base_metrics.update(gen_time=_lowerCAmelCase , gen_len=_lowerCAmelCase , preds=_lowerCAmelCase , target=_lowerCAmelCase , **_lowerCAmelCase )
return base_metrics
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return self._generative_step(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.validation_epoch_end(_lowerCAmelCase , prefix='''test''' )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE: Any = self.dataset_class(
self.tokenizer , type_path=_lowerCAmelCase , n_obs=_lowerCAmelCase , max_target_length=_lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.get_dataset(_lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE: Union[str, Any] = dataset.make_sortish_sampler(_lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCAmelCase , num_workers=self.num_workers , sampler=_lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE: Tuple = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCAmelCase , batch_sampler=_lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCAmelCase , num_workers=self.num_workers , sampler=_lowerCAmelCase , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowerCAmelCase )
return dataloader
def snake_case_ ( self ):
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self ):
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_lowerCAmelCase , _lowerCAmelCase )
add_generic_args(_lowerCAmelCase , _lowerCAmelCase )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowerCAmelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowerCAmelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument('''--logger_name''' , type=_lowerCAmelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_lowerCAmelCase , default=500 , required=_lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_lowerCAmelCase , default='''summarization''' , required=_lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_lowerCAmelCase , default=0.0 , required=_lowerCAmelCase )
parser.add_argument('''--src_lang''' , type=_lowerCAmelCase , default='''''' , required=_lowerCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_lowerCAmelCase , default='''''' , required=_lowerCAmelCase )
parser.add_argument('''--eval_beams''' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument(
'''--val_metric''' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_lowerCAmelCase , default=1 , required=_lowerCAmelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = '''translation'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''loss''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''bleu''']
SCREAMING_SNAKE_CASE__ : int = '''bleu'''
def __init__( self , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
super().__init__(_lowerCAmelCase , **_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = hparams.src_lang
__SCREAMING_SNAKE_CASE: Any = hparams.tgt_lang
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE: SummarizationModule = SummarizationModule(UpperCamelCase__ )
else:
__SCREAMING_SNAKE_CASE: SummarizationModule = TranslationModule(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
__SCREAMING_SNAKE_CASE: Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE: Dict = os.environ.get('''WANDB_PROJECT''' , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE: Tuple = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE: Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE: List[str] = False
__SCREAMING_SNAKE_CASE: Dict = args.val_metric == '''loss'''
__SCREAMING_SNAKE_CASE: pl.Trainer = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE: Optional[int] = ''''''
__SCREAMING_SNAKE_CASE: Dict = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=UpperCamelCase__ ) )
if checkpoints:
__SCREAMING_SNAKE_CASE: Tuple = checkpoints[-1]
__SCREAMING_SNAKE_CASE: str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
lowerCAmelCase : int = pl.Trainer.add_argparse_args(parser)
lowerCAmelCase : Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase : Dict = parser.parse_args()
main(args)
| 146 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 146 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase = 100 ):
'''simple docstring'''
UpperCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 673 | 0 |
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
else:
return a * actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(a_ , a_ )
return actual_power(a_ , a_ )
if __name__ == "__main__":
print(power(-2, -3))
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Union[str, Any] = 'poolformer'
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=4.0 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[64, 128, 320, 512] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[2, 1, 1, 1] , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=True , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = stride
lowerCAmelCase_ = padding
lowerCAmelCase_ = pool_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = depths
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_layer_scale
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = initializer_range
super().__init__(**_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = version.parse('1.11' )
@property
def lowercase__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowercase__ ( self):
return 2E-3
| 413 | 0 |
"""simple docstring"""
import pprint
import requests
__lowerCAmelCase : Optional[Any] = '''https://zenquotes.io/api'''
def __lowerCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Dict = random_quotes()
pprint.pprint(response)
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase : str = logging.get_logger(__name__)
def A_( A : Tuple , A : Optional[int] , A : str , A : Union[str, Any]):
def constraint_to_multiple_of(A : int , A : Any , A : Tuple=0 , A : Optional[Any]=None):
UpperCamelCase = round(val / multiple) * multiple
if max_val is not None and x > max_val:
UpperCamelCase = math.floor(val / multiple) * multiple
if x < min_val:
UpperCamelCase = math.ceil(val / multiple) * multiple
return x
UpperCamelCase = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__) else output_size
UpperCamelCase = get_image_size(lowerCamelCase__)
UpperCamelCase = output_size
# determine new height and width
UpperCamelCase = output_height / input_height
UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
UpperCamelCase = scale_width
else:
# fit height
UpperCamelCase = scale_height
UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__)
UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__)
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , )-> str:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase = size if size is not None else {"height": 384, "width": 384}
UpperCamelCase = get_size_dict(UpperCamelCase_ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = keep_aspect_ratio
UpperCamelCase = ensure_multiple_of
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , )-> str:
'''simple docstring'''
UpperCamelCase = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> List[Any]:
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , )-> List[str]:
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> str:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase_ )
UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Any:
'''simple docstring'''
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase_ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(UpperCamelCase_ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase_ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
lowerCAmelCase : int = '2020.9.26'
lowerCAmelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def A_( A : float , A : float , A : float , A : float , A : float):
if not all(isinstance(A , (float, int)) for val in locals().values()):
UpperCamelCase = f'''Input values must either be float or int: {list(locals().values())}'''
raise TypeError(A)
UpperCamelCase = ((x * distance) / (z + distance)) * scale
UpperCamelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_( A : float , A : float , A : float , A : str , A : float):
if not isinstance(A , A):
raise TypeError('Axis must be a str')
UpperCamelCase = locals()
del input_variables["axis"]
if not all(isinstance(A , (float, int)) for val in input_variables.values()):
UpperCamelCase = (
'Input values except axis must either be float or int: '
f'''{list(input_variables.values())}'''
)
raise TypeError(A)
UpperCamelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCamelCase = x * math.cos(A) - y * math.sin(A)
UpperCamelCase = y * math.cos(A) + x * math.sin(A)
UpperCamelCase = z
elif axis == "x":
UpperCamelCase = y * math.cos(A) - z * math.sin(A)
UpperCamelCase = z * math.cos(A) + y * math.sin(A)
UpperCamelCase = x
elif axis == "y":
UpperCamelCase = x * math.cos(A) - z * math.sin(A)
UpperCamelCase = z * math.cos(A) + x * math.sin(A)
UpperCamelCase = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'')
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 432 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''trocr'''
_lowercase : int = ['''past_key_values''']
_lowercase : Dict = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self: int , UpperCamelCase_: Tuple=50_265 , UpperCamelCase_: List[str]=1_024 , UpperCamelCase_: Dict=12 , UpperCamelCase_: Optional[Any]=16 , UpperCamelCase_: Tuple=4_096 , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Dict=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: Tuple=2 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = activation_function
lowercase__ = max_position_embeddings
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = init_std
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = scale_embedding
lowercase__ = use_learned_position_embeddings
lowercase__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
'''simple docstring'''
from manim import *
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = Rectangle(height=0.5 , width=0.5 )
A : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A : int = Rectangle(height=0.25 , width=0.25 )
A : Optional[Any] = [mem.copy() for i in range(6 )]
A : List[Any] = [mem.copy() for i in range(6 )]
A : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : int = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : Tuple = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : List[str] = Text('''CPU''' , font_size=24 )
A : Optional[int] = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE )
A : Dict = [mem.copy() for i in range(4 )]
A : List[Any] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : Dict = Text('''GPU''' , font_size=24 )
A : int = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE )
A : Optional[int] = [mem.copy() for i in range(6 )]
A : List[str] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : int = Text('''Model''' , font_size=24 )
A : int = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE )
A : Optional[Any] = []
A : int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE ):
A : List[Any] = fill.copy().set_fill(SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE )
model_arr.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE )
self.add(*SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
A : Any = [meta_mem.copy() for i in range(6 )]
A : Optional[Any] = [meta_mem.copy() for i in range(6 )]
A : List[Any] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : List[Any] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : Union[str, Any] = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : List[Any] = Text('''Disk''' , font_size=24 )
A : str = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A : str = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE )
A : int = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE ) )
A : Dict = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE ) )
A : Dict = Arrow(start=SCREAMING_SNAKE_CASE , end=SCREAMING_SNAKE_CASE , color=SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A : List[str] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE , run_time=3 ) )
A : str = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A : Tuple = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A : int = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A : Optional[int] = a_c
A : List[str] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE ) , FadeOut(SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
A : Dict = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE ) )
self.wait()
| 343 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
A : Dict = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , )
is not None
):
A : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A : Tuple = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A : List[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A : List[Any] = True
if not attribute_used:
A : str = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A : Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
A : Dict = True
# configuration class specific cases
if not case_allowed:
A : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
A : Tuple = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A : Dict = {}
if len(config_class.attribute_map ) > 0:
A : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A : Optional[Any] = inspect.getsourcefile(snake_case__ )
A : Optional[int] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A : Union[str, Any] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A : List[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
A : str = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
A : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A : List[Any] = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
A : Tuple = unused_attributes
if len(snake_case__ ) > 0:
A : Any = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 343 | 1 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 , 1_001 ):
total += i**i
return str(A__ )[-10:]
if __name__ == "__main__":
print(solution())
| 430 |
from collections.abc import Sequence
def __A(lowerCAmelCase = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_UpperCamelCase = nums[0]
for i in range(1 , len(lowerCAmelCase ) ):
_UpperCamelCase = nums[i]
_UpperCamelCase = max(lowerCAmelCase , ans + num , lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ = int(input("Enter number of elements : ").strip())
lowerCamelCase__ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 612 | 0 |
'''simple docstring'''
import numpy as np
_UpperCamelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array(__A )
def lowercase__ ( self : Dict , __A : str ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = np.where(letter == self.SQUARE )
lowerCAmelCase__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase__ ( self : Union[str, Any] , __A : int , __A : int ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase__ ( self : List[str] , __A : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
lowerCAmelCase__ = message.replace(""" """ , """""" )
lowerCAmelCase__ = message.replace("""j""" , """i""" )
lowerCAmelCase__ = np.empty((2, len(__A )) )
for letter_index in range(len(__A ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape(2 * len(__A ) )
lowerCAmelCase__ = """"""
for numbers_index in range(len(__A ) ):
lowerCAmelCase__ = int(second_step[numbers_index * 2] )
lowerCAmelCase__ = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase__ = self.numbers_to_letter(__A , __A )
lowerCAmelCase__ = encoded_message + letter
return encoded_message
def lowercase__ ( self : Optional[Any] , __A : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
message.replace(""" """ , """""" )
lowerCAmelCase__ = np.empty(2 * len(__A ) )
for letter_index in range(len(__A ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape((2, len(__A )) )
lowerCAmelCase__ = """"""
for numbers_index in range(len(__A ) ):
lowerCAmelCase__ = int(second_step[0, numbers_index] )
lowerCAmelCase__ = int(second_step[1, numbers_index] )
lowerCAmelCase__ = self.numbers_to_letter(__A , __A )
lowerCAmelCase__ = decoded_message + letter
return decoded_message
| 211 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = BlipImageProcessor()
lowerCAmelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowerCAmelCase__ = BlipaProcessor(__A , __A )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] , **__A : Optional[int] ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer
def lowercase__ ( self : Dict , **__A : List[Any] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__A , return_tensors="""np""" )
lowerCAmelCase__ = processor(images=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = processor(text=__A )
lowerCAmelCase__ = tokenizer(__A , return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__A )
lowerCAmelCase__ = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__A , images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 211 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case =["""small""", """medium""", """large"""]
__snake_case ="""lm_head.decoder.weight"""
__snake_case ="""lm_head.weight"""
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = torch.load(lowerCamelCase )
lowerCAmelCase = d.pop(lowerCamelCase )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
torch.save(lowerCamelCase , os.path.join(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__snake_case =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case =os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
__snake_case =F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 133 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case ="""
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case ="""\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__snake_case ="""
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def a_ ( lowerCamelCase : Any ):
def remove_articles(lowerCamelCase : Tuple ):
lowerCAmelCase = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase , ' ' , lowerCamelCase )
def white_space_fix(lowerCamelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase : Any ):
lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase ) ) ) )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Tuple ):
return int(normalize_answer(lowerCamelCase ) == normalize_answer(lowerCamelCase ) )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
lowerCAmelCase = [any(compute_exact(lowerCamelCase , lowerCamelCase ) for ref in refs ) for pred, refs in zip(lowerCamelCase , lowerCamelCase )]
return (sum(lowerCamelCase ) / len(lowerCamelCase )) * 100
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase = Counter(lowerCamelCase )
lowerCAmelCase = Counter(lowerCamelCase )
lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase = scount * numref
lowerCAmelCase = Counter(lowerCamelCase )
lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase = ccount * numref
# KEEP
lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase = keepgramcounter_rep & rgramcounter
lowerCAmelCase = sgramcounter_rep & rgramcounter
lowerCAmelCase = 0
lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase = 1
lowerCAmelCase = 1
if len(lowerCamelCase ) > 0:
lowerCAmelCase = keeptmpscorea / len(lowerCamelCase )
if len(lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase = delgramcounter_rep - rgramcounter
lowerCAmelCase = sgramcounter_rep - rgramcounter
lowerCAmelCase = 0
lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase = 1
if len(lowerCamelCase ) > 0:
lowerCAmelCase = deltmpscorea / len(lowerCamelCase )
# ADDITION
lowerCAmelCase = set(lowerCamelCase ) - set(lowerCamelCase )
lowerCAmelCase = set(lowerCamelCase ) & set(lowerCamelCase )
lowerCAmelCase = set(lowerCamelCase ) - set(lowerCamelCase )
lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase = 1
lowerCAmelCase = 1
if len(lowerCamelCase ) > 0:
lowerCAmelCase = addtmpscore / len(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = addtmpscore / len(lowerCamelCase )
lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = len(lowerCamelCase )
lowerCAmelCase = ssent.split(' ' )
lowerCAmelCase = csent.split(' ' )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for rsent in rsents:
lowerCAmelCase = rsent.split(' ' )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
ragramslist.append(lowerCamelCase )
for i in range(0 , len(lowerCamelCase ) - 1 ):
if i < len(lowerCamelCase ) - 1:
lowerCAmelCase = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 2:
lowerCAmelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 3:
lowerCAmelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase )
ragramslist.append(lowerCamelCase )
ragramslist.append(lowerCamelCase )
ragramslist.append(lowerCamelCase )
for i in range(0 , len(lowerCamelCase ) - 1 ):
if i < len(lowerCamelCase ) - 1:
lowerCAmelCase = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 2:
lowerCAmelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 3:
lowerCAmelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase )
for i in range(0 , len(lowerCamelCase ) - 1 ):
if i < len(lowerCamelCase ) - 1:
lowerCAmelCase = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 2:
lowerCAmelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase )
if i < len(lowerCamelCase ) - 3:
lowerCAmelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = SARIngram(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = SARIngram(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = SARIngram(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = SARIngram(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a_ ( lowerCamelCase : str , lowerCamelCase : bool = True , lowerCamelCase : str = "13a" , lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase )()(lowerCamelCase )
else:
lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase )
elif tokenizer == "moses":
lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(lowerCamelCase , return_str=lowerCamelCase , escape=lowerCamelCase )
elif tokenizer == "penn":
lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase , return_str=lowerCamelCase )
else:
lowerCAmelCase = sentence
if not return_str:
lowerCAmelCase = normalized_sent.split()
return normalized_sent
def a_ ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Tuple ):
if not (len(lowerCamelCase ) == len(lowerCamelCase ) == len(lowerCamelCase )):
raise ValueError('Sources length must match predictions and references lengths.' )
lowerCAmelCase = 0
for src, pred, refs in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
sari_score += SARIsent(normalize(lowerCamelCase ) , normalize(lowerCamelCase ) , [normalize(lowerCamelCase ) for sent in refs] )
lowerCAmelCase = sari_score / len(lowerCamelCase )
return 100 * sari_score
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]="exp" , lowerCamelCase : str=None , lowerCamelCase : Any=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=False , ):
lowerCAmelCase = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
lowerCAmelCase = sacrebleu.corpus_bleu(
lowerCamelCase , lowerCamelCase , smooth_method=lowerCamelCase , smooth_value=lowerCamelCase , force=lowerCamelCase , lowercase=lowerCamelCase , use_effective_order=lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : int ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
lowerCAmelCase = {}
result.update({'sari': compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({'exact': compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
return result
| 133 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
_UpperCamelCase : List[str] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Any ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
lowercase = tmp_path_factory.getbasetemp() / 'cache'
lowercase = test_hf_cache_home / 'datasets'
lowercase = test_hf_cache_home / 'metrics'
lowercase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__snake_case ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__snake_case ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__snake_case ) )
lowercase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__snake_case ) )
lowercase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
@pytest.fixture(autouse=__snake_case , scope='session' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __snake_case )
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __snake_case )
| 134 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a ( a_ ):
UpperCAmelCase_ : Optional[Any] ="mobilenet_v2"
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1.0 , _lowerCamelCase=8 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu6" , _lowerCamelCase=True , _lowerCamelCase=0.8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=2_5_5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = depth_divisible_by
lowercase = min_depth
lowercase = expand_ratio
lowercase = output_stride
lowercase = first_layer_is_expansion
lowercase = finegrained_output
lowercase = hidden_act
lowercase = tf_padding
lowercase = classifier_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = semantic_loss_ignore_index
class a ( a_ ):
UpperCAmelCase_ : Dict =version.parse("1.11" )
@property
def UpperCamelCase_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self ):
return 1e-4
| 134 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A__ ( UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCamelCase , ) | 494 | '''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if not isinstance(a_, a_ ):
_UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 0:
return False
_UpperCAmelCase : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Any = value
a__ : Optional[int] = None
a__ : str = None
class __lowerCAmelCase :
def __init__( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : str = tree
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 0 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=1E-1_2 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
return jnp.matmul(SCREAMING_SNAKE_CASE_ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_A = 42
_A = jnp.floataa
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase__ , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE_ : str = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE_ : Tuple = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.vision_model(lowercase__ )[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = jax_cosine_distance(lowercase__ , self.special_care_embeds )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax_cosine_distance(lowercase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
SCREAMING_SNAKE_CASE_ : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : int = jnp.round(lowercase__ , 3 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase__ )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : List[Any] = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : Tuple = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase__ , 3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = CLIPConfig
_A = "clip_input"
_A = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = 0 , lowercase__ = jnp.floataa , lowercase__ = True , **lowercase__ , ):
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE_ : str = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : Dict = self.module_class(config=lowercase__ , dtype=lowercase__ , **lowercase__ )
super().__init__(lowercase__ , lowercase__ , input_shape=lowercase__ , seed=lowercase__ , dtype=lowercase__ , _do_init=_do_init )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"params": params_rng, "dropout": dropout_rng}
SCREAMING_SNAKE_CASE_ : Optional[int] = self.module.init(lowercase__ , lowercase__ )["params"]
return random_params
def __call__( self , lowercase__ , lowercase__ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = jnp.transpose(lowercase__ , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(lowercase__ , dtype=jnp.floataa ) , rngs={} , )
| 421 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 5_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ : Optional[int] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 421 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, "src", "diffusers")
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> str:
"""simple docstring"""
_a = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
_a = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_a = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A )
| 352 |
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__A):
_a = (ord(pattern[i]) + p_hash * alphabet_size) % modulus
_a = (ord(text[i]) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
) % modulus
return False
def lowerCAmelCase ():
"""simple docstring"""
_a = '''abc1abc12'''
_a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__A , __A) and not rabin_karp(__A , __A)
# Test 2)
_a = '''ABABX'''
_a = '''ABABZABABYABABX'''
assert rabin_karp(__A , __A)
# Test 3)
_a = '''AAAB'''
_a = '''ABAAAAAB'''
assert rabin_karp(__A , __A)
# Test 4)
_a = '''abcdabcy'''
_a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__A , __A)
# Test 5)
_a = '''Lü'''
_a = '''Lüsai'''
assert rabin_karp(__A , __A)
_a = '''Lue'''
assert not rabin_karp(__A , __A)
print('''Success.''')
if __name__ == "__main__":
test_rabin_karp()
| 352 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : Any = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
snake_case : Optional[Any] = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(snake_case__ )
from datasets import load_dataset
snake_case : Optional[int] = load_dataset("nielsr/rvlcdip-demo" )
snake_case : List[Any] = dataset["train"][0]["image"].convert("RGB" )
snake_case : int = image_processor(snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case : Any = model(**snake_case__ )
snake_case : Union[str, Any] = outputs.logits
snake_case : Optional[Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case__ )
snake_case : int = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=snake_case__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 204 |
from math import factorial
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
snake_case : Tuple = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
snake_case : str = float(factorial(__lowerCamelCase ) )
coefficient /= factorial(__lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 204 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Union[str, Any] , *UpperCAmelCase_: Union[str, Any] , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 707 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: List[str]=18 , UpperCAmelCase_: Any=30 , UpperCAmelCase_: str=400 , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: str=None , UpperCAmelCase_: List[Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = apply_ocr
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """apply_ocr""" ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_SCREAMING_SNAKE_CASE = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 569 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =jnp.floataa
def __snake_case ( self : List[str] ):
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , a__ : int ):
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = hidden_states.shape
UpperCAmelCase = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
UpperCAmelCase = self.conv(a__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =jnp.floataa
def __snake_case ( self : List[Any] ):
UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , a__ : Dict ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
UpperCAmelCase = self.conv(a__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =None
_lowerCamelCase =0.0
_lowerCamelCase =None
_lowerCamelCase =jnp.floataa
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = nn.Dense(a__ , dtype=self.dtype )
UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase = nn.Dropout(self.dropout_prob )
UpperCAmelCase = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase = None
if use_nin_shortcut:
UpperCAmelCase = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Dict , a__ : Optional[Any] , a__ : int , a__ : Dict=True ):
UpperCAmelCase = hidden_states
UpperCAmelCase = self.norma(a__ )
UpperCAmelCase = nn.swish(a__ )
UpperCAmelCase = self.conva(a__ )
UpperCAmelCase = self.time_emb_proj(nn.swish(a__ ) )
UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
UpperCAmelCase = hidden_states + temb
UpperCAmelCase = self.norma(a__ )
UpperCAmelCase = nn.swish(a__ )
UpperCAmelCase = self.dropout(a__ , a__ )
UpperCAmelCase = self.conva(a__ )
if self.conv_shortcut is not None:
UpperCAmelCase = self.conv_shortcut(a__ )
return hidden_states + residual
| 51 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__a = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = NllbTokenizer
lowercase = []
lowercase = []
def __init__( self : List[str] , snake_case_ : int=None , snake_case_ : Optional[int]=None , snake_case_ : Dict="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
snake_case__ : str = legacy_behaviour
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , )
snake_case__ : Optional[Any] = vocab_file
snake_case__ : str = False if not self.vocab_file else True
snake_case__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case__ : Optional[int] = {
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ : Any = src_lang if src_lang is not None else """eng_Latn"""
snake_case__ : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
snake_case__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : str , snake_case_ : str ):
snake_case__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : Any = src_lang
snake_case__ : str = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
snake_case__ : Dict = self.convert_tokens_to_ids(snake_case_ )
snake_case__ : str = tgt_lang_id
return inputs
def lowerCamelCase ( self : int , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : str , ):
snake_case__ : str = src_lang
snake_case__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : List[str] ):
snake_case__ : List[Any] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : Tuple = []
snake_case__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Tuple = [self.cur_lang_code]
snake_case__ : int = [self.eos_token_id]
snake_case__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : int , snake_case_ : str ):
snake_case__ : List[str] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : int = []
snake_case__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Dict = [self.cur_lang_code]
snake_case__ : Dict = [self.eos_token_id]
snake_case__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case__ : Dict = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 374 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a__( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''biogpt'''
def __init__( self , __lowerCAmelCase=42384 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1024 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = scale_embedding
lowerCAmelCase = use_cache
lowerCAmelCase = layerdrop
lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
| 717 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase)
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 605 | 0 |
'''simple docstring'''
from math import factorial, pi
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 601 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''torchsde''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 601 | 1 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
_a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
_a = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
_a = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
_a = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
return image
def _lowerCamelCase ( lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
if "visual_encoder" in key:
_a = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case__ )
if "blocks" in key:
_a = re.sub(r"blocks" , "layers" , snake_case__ )
if "attn" in key:
_a = re.sub(r"attn" , "self_attn" , snake_case__ )
if "norm1" in key:
_a = re.sub(r"norm1" , "layer_norm1" , snake_case__ )
if "norm2" in key:
_a = re.sub(r"norm2" , "layer_norm2" , snake_case__ )
if "encoder.norm" in key:
_a = re.sub(r"encoder.norm" , "post_layernorm" , snake_case__ )
if "encoder.patch_embed.proj" in key:
_a = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case__ )
if "encoder.pos_embed" in key:
_a = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , snake_case__ )
if "encoder.cls_token" in key:
_a = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , snake_case__ )
if "self_attn" in key:
_a = re.sub(r"self_attn.proj" , "self_attn.projection" , snake_case__ )
return key
@torch.no_grad()
def _lowerCamelCase ( lowercase : int , lowercase : Any=None ) -> Any:
'''simple docstring'''
if config_path is not None:
_a = BlipConfig.from_pretrained(snake_case__ )
else:
_a = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_a = BlipForConditionalGeneration(snake_case__ ).eval()
_a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
_a = blip_decoder(pretrained=snake_case__ , image_size=384 , vit="base" )
_a = pt_model.eval()
_a = pt_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(snake_case__ )
_a = rename_key(snake_case__ )
_a = value
hf_model.load_state_dict(snake_case__ )
_a = 384
_a = load_demo_image(image_size=snake_case__ , device="cpu" )
_a = BertTokenizer.from_pretrained("bert-base-uncased" )
_a = tokenizer(["a picture of"] ).input_ids
_a = hf_model.generate(snake_case__ , snake_case__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_a = hf_model.generate(snake_case__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
_a = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit="base" )
vqa_model.eval()
_a = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(snake_case__ )
_a = rename_key(snake_case__ )
_a = value
_a = BlipForQuestionAnswering(snake_case__ )
hf_vqa_model.load_state_dict(snake_case__ )
_a = ["How many dogs are in this image?"]
_a = tokenizer(snake_case__ , return_tensors="pt" ).input_ids
_a = hf_vqa_model.generate(snake_case__ , snake_case__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
_a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
_a = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit="base" )
itm_model.eval()
_a = itm_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(snake_case__ )
_a = rename_key(snake_case__ )
_a = value
_a = BlipForImageTextRetrieval(snake_case__ )
_a = ["A picture of a woman with a dog sitting in a beach"]
_a = tokenizer(
snake_case__ , return_tensors="pt" , padding="max_length" , truncation=snake_case__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case__ )
hf_itm_model.eval()
_a = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
_a = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 717 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ : List[Any] = [
'good first issue',
'feature request',
'wip',
]
def _lowerCamelCase ( ) -> Dict:
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/accelerate" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase : i.created_at , reverse=lowercase )
_a = comments[0] if len(lowercase ) > 0 else None
_a = dt.utcnow()
_a = (current_time - issue.updated_at).days
_a = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 521 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowercase : Tuple =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
def __init__( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Tuple=7 , _lowerCAmelCase: Dict=400 , _lowerCAmelCase: List[Any]=2000 , _lowerCAmelCase: Any=2048 , _lowerCAmelCase: str=128 , _lowerCAmelCase: List[str]=1 , _lowerCAmelCase: Any=512 , _lowerCAmelCase: Union[str, Any]=30 , _lowerCAmelCase: Any=4_4100 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =spectrogram_length
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =num_audio_channels
UpperCAmelCase_ =hop_length
UpperCAmelCase_ =chunk_length
UpperCAmelCase_ =sampling_rate
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: str=False , _lowerCAmelCase: Optional[int]=False ) -> Any:
'''simple docstring'''
def _flatten(_lowerCAmelCase: Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( __lowercase , unittest.TestCase ):
_snake_case =TvltFeatureExtractor
def lowerCAmelCase__ ( self: int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase_ =feature_extractor(
_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 , mask_audio=_lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =TvltFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
UpperCAmelCase_ =torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCAmelCase , atol=1e-4 ) )
| 54 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _snake_case ( snake_case__ : dict ):
return (data["data"], data["target"])
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
A = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(snake_case__ , snake_case__ )
# Predict target for test data
A = xgb.predict(snake_case__ )
A = predictions.reshape(len(snake_case__ ) , 1 )
return predictions
def _snake_case ( ):
A = fetch_california_housing()
A , A = data_handling(snake_case__ )
A , A , A , A = train_test_split(
snake_case__ , snake_case__ , test_size=0.25 , random_state=1 )
A = xgboost(snake_case__ , snake_case__ , snake_case__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(snake_case__ , snake_case__ )}' )
print(F'Mean Square Error : {mean_squared_error(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__lowercase = data
__lowercase = previous
__lowercase = next_node
def __str__( self : int ) -> str:
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return self.data
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.next
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.previous
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
__lowercase = head
def __iter__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self
def UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__lowercase = self.current.get_data()
__lowercase = self.current.get_next()
return value
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] ) -> Any:
'''simple docstring'''
__lowercase = None # First node in list
__lowercase = None # Last node in list
def __str__( self : int ) -> Any:
'''simple docstring'''
__lowercase = self.head
__lowercase = []
while current is not None:
nodes.append(current.get_data() )
__lowercase = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self : Tuple , __lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
__lowercase = self.head
while current:
if current.get_data() == value:
return True
__lowercase = current.get_next()
return False
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase ( self : str , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if self.head is None:
__lowercase = node
__lowercase = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase ( self : Dict , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
__lowercase = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase ( self : int , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
__lowercase = node
__lowercase = node.previous
if node.get_previous() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
__lowercase = node
__lowercase = node.next
if node.get_next() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
'''simple docstring'''
__lowercase = 1
__lowercase = Node(__lowerCamelCase )
__lowercase = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
__lowercase = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> Node:
'''simple docstring'''
__lowercase = self.head
while node:
if node.get_data() == item:
return node
__lowercase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
__lowercase = self.head.get_next()
if node == self.tail:
__lowercase = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase ( __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if node.get_next():
__lowercase = node.previous
if node.get_previous():
__lowercase = node.next
__lowercase = None
__lowercase = None
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.head is None
def SCREAMING_SNAKE_CASE ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__lowercase = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , snake_case )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_001
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = 'huggingface/label-files'
__lowercase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(snake_case ) + 1: v for k, v in idalabel.items()}
__lowercase = 'background'
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=False ) -> Union[str, Any]:
__lowercase = get_mobilenet_va_config(snake_case )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case , snake_case , snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
__lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowercase = model(**snake_case )
__lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('Pushing to the hub...' )
__lowercase = 'google/' + model_name
image_processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 375 | 1 |
'''simple docstring'''
import inspect
import unittest
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> List[str]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _UpperCamelCase ( self ) -> Union[str, Any]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : List[Any] = inspect.getmembers(lowercase__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : str = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : int = 'invisible-watermark'
assert backend in deps, F"""{backend} is not in the deps table!"""
| 179 | '''simple docstring'''
def __lowerCAmelCase ( a_ = 1000 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 2**power
SCREAMING_SNAKE_CASE : Any = str(a_ )
SCREAMING_SNAKE_CASE : int = list(a_ )
SCREAMING_SNAKE_CASE : Any = 0
for i in list_num:
sum_of_num += int(a_ )
return sum_of_num
if __name__ == "__main__":
_lowerCAmelCase :Tuple = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCAmelCase :int = solution(power)
print("""Sum of the digits is: """, result)
| 179 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_="None" ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : List[Any] = parent
snake_case : Dict = batch_size
snake_case : Dict = seq_length
snake_case : Optional[Any] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : int = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : str = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Tuple = num_labels
snake_case : int = num_choices
snake_case : List[Any] = relative_attention
snake_case : int = position_biased_input
snake_case : Any = pos_att_type
snake_case : Optional[int] = scope
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : List[Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[str] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case : Any = None
snake_case : Optional[Any] = None
snake_case : str = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case : int = DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=SCREAMING_SNAKE_CASE_ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[Any] = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case : Dict = [input_ids, input_mask]
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
snake_case : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : Any = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = self.num_labels
snake_case : Union[str, Any] = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[Any] = config_and_inputs
snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Any = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Dict = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = TFDebertaVaModelTester(self )
snake_case : Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
snake_case : Dict = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ )[0]
snake_case : Optional[Any] = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 )
| 36 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] ="dpr"
def __init__( self ,_snake_case=3_05_22 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=0 ,_snake_case="absolute" ,_snake_case = 0 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = projection_dim
UpperCAmelCase_ : Optional[int] = position_embedding_type
| 323 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = array[indexa], array[indexa]
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : List[str] = int(length / 2 )
for i in range(_SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + middle , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : Tuple = int(length / 2 )
bitonic_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 323 | 1 |
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = len(__lowercase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowercase , end=""",""" )
# Consider rest of the activities
for j in range(__lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowercase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 129 |
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
while second != 0:
lowerCamelCase__ = first & second
first ^= second
lowerCamelCase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = int(input("""Enter the first number: """).strip())
__magic_name__ = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 129 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
for attribute in key.split("." ):
a__ = getattr(_lowercase , _lowercase )
if weight_type is not None:
a__ = getattr(_lowercase , _lowercase ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.feature_extractor
a__ = hf_model.adapter
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
a__ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__ = True
if "*" in mapped_key:
a__ = name.split(_lowercase )[0].split("." )[-2]
a__ = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
a__ = "weight_g"
elif "weight_v" in name:
a__ = "weight_v"
elif "bias" in name:
a__ = "bias"
elif "weight" in name:
a__ = "weight"
else:
a__ = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = full_name.split("conv_layers." )[-1]
a__ = name.split("." )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowercase )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = full_name.split("adaptor." )[-1]
a__ = name.split("." )
if items[1].isdigit():
a__ = int(items[1] )
else:
a__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
a__ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
a__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
a__ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
a__ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(_lowercase , _lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
a__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
a__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ , a__ = emb.weight.shape
a__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
a__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
a__ = WavaVecaConfig.from_pretrained(
_lowercase , add_adapter=_lowercase , adapter_stride=_lowercase , adapter_kernel_size=_lowercase , use_auth_token=_lowercase , output_hidden_size=_lowercase , )
a__ = MBartConfig.from_pretrained(_lowercase )
# load model
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
a__ = model[0].eval()
# load feature extractor
a__ = WavaVecaFeatureExtractor.from_pretrained(_lowercase , use_auth_token=_lowercase )
# set weights for wav2vec2 encoder
a__ = WavaVecaModel(_lowercase )
recursively_load_weights_wavaveca(model.encoder , _lowercase )
# load decoder weights
a__ = MBartForCausalLM(_lowercase )
a__ , a__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
a__ = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
a__ = False
a__ = MBartaaTokenizer(_lowercase )
tokenizer.save_pretrained(_lowercase )
a__ = hf_wavavec.config.to_dict()
a__ = tokenizer.pad_token_id
a__ = tokenizer.bos_token_id
a__ = tokenizer.eos_token_id
a__ = "mbart50"
a__ = "wav2vec2"
a__ = tokenizer.eos_token_id
a__ = 25_00_04
a__ = tokenizer.eos_token_id
a__ = SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250_004, type=int, help="""`decoder_start_token_id` of model config""")
UpperCamelCase_ : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 394 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = ['''pixel_values''']
def __init__( self : Any ,a__ : bool = True ,a__ : int = 32 ,a__ : Any=PILImageResampling.BILINEAR ,a__ : bool = True ,**a__ : str ,):
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**a__ )
def lowerCAmelCase_ ( self : List[Any] ,a__ : np.ndarray ,a__ : int ,a__ : Tuple ,a__ : Optional[ChannelDimension] = None ,**a__ : Any ):
a__ , a__ = get_image_size(a__ )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(a__ ,(new_h, new_w) ,resample=a__ ,data_format=a__ ,**a__ )
return image
def lowerCAmelCase_ ( self : str ,a__ : np.ndarray ,a__ : float ,a__ : Optional[ChannelDimension] = None ,**a__ : Optional[Any] ):
return rescale(image=a__ ,scale=a__ ,data_format=a__ ,**a__ )
def lowerCAmelCase_ ( self : int ,a__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,a__ : Optional[bool] = None ,a__ : Optional[int] = None ,a__ : str=None ,a__ : Optional[bool] = None ,a__ : Optional[Union[TensorType, str]] = None ,a__ : ChannelDimension = ChannelDimension.FIRST ,**a__ : List[str] ,):
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
a__ = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(a__ ) for img in images]
if do_resize:
a__ = [self.resize(a__ ,size_divisor=a__ ,resample=a__ ) for image in images]
if do_rescale:
a__ = [self.rescale(a__ ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(a__ ,a__ ) for image in images]
a__ = {"pixel_values": images}
return BatchFeature(data=a__ ,tensor_type=a__ )
| 394 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ):
'''simple docstring'''
__lowercase = symbols(lowerCamelCase )
__lowercase = lambdify(lowerCamelCase , lowerCamelCase )
__lowercase = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
__lowercase = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
__lowercase = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowercase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'''{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 80 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( a__ ):
@staticmethod
@abstractmethod
def snake_case__ ( _lowerCamelCase):
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
raise NotImplementedError() | 407 | 0 |
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
__lowercase = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
__lowercase = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
__lowercase = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
__lowercase = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase__ = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase__ = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 576 | import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ProphetNetTokenizer
__lowerCAmelCase = False
def snake_case__ ( self ):
super().setUp()
__lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = "UNwant\u00E9d,running"
__lowercase = "unwanted, running"
return input_text, output_text
def snake_case__ ( self ):
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ):
__lowercase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def snake_case__ ( self ):
__lowercase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowercase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def snake_case__ ( self ):
__lowercase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowercase = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
__lowercase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def snake_case__ ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def snake_case__ ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def snake_case__ ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def snake_case__ ( self ):
__lowercase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__lowercase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
__lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 576 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = CycleDiffusionPipeline
lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
lowerCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
lowerCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self : int , _lowercase : List[str] , _lowercase : int=0 ):
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
SCREAMING_SNAKE_CASE__ : str = module.half()
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase__ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowercase__ ( self : str ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase__ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE__ : List[str] = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ : Tuple = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : List[str] = '''A black colored car'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A blue colored car'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ : int = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A black colored car'''
SCREAMING_SNAKE_CASE__ : int = '''A blue colored car'''
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 35 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__(A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 100 , A = 0.01 , A = 1 , ) ->Any:
"""simple docstring"""
lowercase__ : Any= False
lowercase__ : int= search_prob
lowercase__ : Optional[Any]= start_temperate
lowercase__ : Any= []
lowercase__ : Any= 0
lowercase__ : Dict= None
while not search_end:
lowercase__ : Optional[int]= current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ : Dict= current_state
scores.append(A )
iterations += 1
lowercase__ : int= None
lowercase__ : int= current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ : Optional[Any]= random.randint(0 , len(A ) - 1 ) # picking a random neighbor
lowercase__ : str= neighbors.pop(A )
lowercase__ : str= picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ : str= change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ : int= picked_neighbor
else:
lowercase__ : str= (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ : Dict= picked_neighbor
lowercase__ : List[str]= current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ : str= True
else:
lowercase__ : List[Any]= next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__(A , A ) ->Dict:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__(A , A ) ->int:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 218 | 0 |
'''simple docstring'''
import argparse
import copy
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> List[str]:
a__ : List[str] = {}
with open(lowerCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a__ : Any = []
_list.append([line.split()[1], line.split()[2]] )
a__ : int = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a__ : Union[str, Any] = []
_list.append([line.split()[0], line.split()[2]] )
a__ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
with open(lowerCamelCase_ ) as f:
a__ : str = f.read(1 )
a__ : Dict = start_node
a__ : Optional[int] = []
a__ : Tuple = start_node
a__ : Optional[Any] = 0
while visiting not in first_solution:
a__ : Dict = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase_ ) and k[0] not in first_solution:
a__ : Optional[int] = k[1]
a__ : Optional[Any] = k[0]
first_solution.append(lowerCamelCase_ )
a__ : Tuple = distance_of_first_solution + int(lowerCamelCase_ )
a__ : Tuple = best_node
first_solution.append(lowerCamelCase_ )
a__ : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a__ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : Optional[Any] = []
for n in solution[1:-1]:
a__ : int = solution.index(lowerCamelCase_ )
for kn in solution[1:-1]:
a__ : List[Any] = solution.index(lowerCamelCase_ )
if n == kn:
continue
a__ : Optional[int] = copy.deepcopy(lowerCamelCase_ )
a__ : Dict = kn
a__ : Tuple = n
a__ : int = 0
for k in _tmp[:-1]:
a__ : Union[str, Any] = _tmp[_tmp.index(lowerCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a__ : Optional[int] = distance + int(i[1] )
_tmp.append(lowerCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a__ : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
a__ : Optional[Any] = 1
a__ : Tuple = first_solution
a__ : Dict = []
a__ : int = distance_of_first_solution
a__ : List[str] = solution
while count <= iters:
a__ : List[str] = find_neighborhood(lowerCamelCase_ , lowerCamelCase_ )
a__ : Tuple = 0
a__ : Any = neighborhood[index_of_best_solution]
a__ : Any = len(lowerCamelCase_ ) - 1
a__ : int = False
while not found:
a__ : Any = 0
while i < len(lowerCamelCase_ ):
if best_solution[i] != solution[i]:
a__ : Optional[Any] = best_solution[i]
a__ : Union[str, Any] = solution[i]
break
a__ : List[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a__ : Union[str, Any] = True
a__ : Optional[int] = best_solution[:-1]
a__ : List[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a__ : Optional[Any] = cost
a__ : Any = solution
else:
a__ : List[str] = index_of_best_solution + 1
a__ : List[str] = neighborhood[index_of_best_solution]
if len(lowerCamelCase_ ) >= size:
tabu_list.pop(0 )
a__ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE( __UpperCamelCase=None ) -> Dict:
a__ : List[str] = generate_neighbours(args.File )
a__ : Optional[Any] = generate_first_solution(
args.File , lowerCamelCase_ )
a__ : Union[str, Any] = tabu_search(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 706 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
"""simple docstring"""
a__ : Any = parent
a__ : str = batch_size
a__ : Optional[int] = image_size
a__ : int = num_channels
a__ : Optional[Any] = embeddings_size
a__ : str = hidden_sizes
a__ : Dict = depths
a__ : List[str] = is_training
a__ : Optional[Any] = use_labels
a__ : Any = hidden_act
a__ : Optional[Any] = num_labels
a__ : Optional[Any] = scope
a__ : str = len(__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Dict = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
a__ : Tuple = self.get_config()
return config, pixel_values, labels
def _A ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = TFRegNetModel(config=__UpperCAmelCase )
a__ : str = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[Any] = self.num_labels
a__ : Dict = TFRegNetForImageClassification(__UpperCAmelCase )
a__ : str = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.prepare_config_and_inputs()
a__ , a__ , a__ : Union[str, Any] = config_and_inputs
a__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :Union[str, Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A :Optional[Any] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A :List[str] = False
A :Dict = False
A :List[str] = False
A :str = False
A :Optional[Any] = False
def _A ( self ):
"""simple docstring"""
a__ : List[str] = TFRegNetModelTester(self )
a__ : int = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _A ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _A ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _A ( self ):
"""simple docstring"""
pass
def _A ( self ):
"""simple docstring"""
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(__UpperCAmelCase )
a__ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Union[str, Any] = [*signature.parameters.keys()]
a__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict = model_class(__UpperCAmelCase )
a__ : str = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
a__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ : Union[str, Any] = layer_type
a__ : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
a__ : Optional[int] = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
a__ : List[Any] = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(__UpperCAmelCase )
a__ : int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a__ : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a__ : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a__ : int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
a__ : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a__ : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = TFRegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE( ) -> Dict:
a__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self ):
"""simple docstring"""
a__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a__ : int = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
a__ : Optional[int] = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
a__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
a__ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
| 207 | 0 |
from __future__ import annotations
from collections import namedtuple
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = namedtuple('result', 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage', power / current )
elif current == 0:
return result('current', power / voltage )
elif power == 0:
return result('power', float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : int):
if number < 0:
raise ValueError('number must not be negative')
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : List[str] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 712 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = original_name.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find('''proj''' )]
SCREAMING_SNAKE_CASE__ = key.replace(_A , F'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 472 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : int = 16
__UpperCamelCase : str = 32
def _UpperCAmelCase ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 16 ):
"""simple docstring"""
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : Any = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Dict = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase : int = 8
else:
__lowerCamelCase : List[str] = None
return tokenizer.pad(
UpperCAmelCase , padding="""longest""" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
__lowerCamelCase : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Tuple = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Dict ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase ) == "1":
__lowerCamelCase : Optional[Any] = 2
# Initialize accelerator
__lowerCamelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Dict = config["""lr"""]
__lowerCamelCase : List[Any] = int(config["""num_epochs"""] )
__lowerCamelCase : Union[str, Any] = int(config["""seed"""] )
__lowerCamelCase : Union[str, Any] = int(config["""batch_size"""] )
__lowerCamelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Optional[int] = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : str = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
__lowerCamelCase : str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Tuple = model(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = outputs.loss
__lowerCamelCase : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCamelCase : Any = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : int = model(**UpperCAmelCase )
__lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCamelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
__lowerCamelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , UpperCAmelCase )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCamelCase : List[Any] = parser.parse_args()
__lowerCamelCase : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 519 |
import numpy as np
from PIL import Image
def _UpperCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : List[str] = np.array(UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__lowerCamelCase : Dict = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = 0
# compute the shape of the output matrix
__lowerCamelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Any = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : Dict = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Union[str, Any] = 0
return updated_arr
def _UpperCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = np.array(UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = 0
__lowerCamelCase : List[str] = 0
# compute the shape of the output matrix
__lowerCamelCase : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__UpperCamelCase : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 519 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a, __a, __a, __a=0.2, __a=0.2):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = bp_numa
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : str = bp_numa
_lowerCAmelCase : int = conva_get[:2]
_lowerCAmelCase : Optional[int] = conva_get[2]
_lowerCAmelCase : List[Any] = size_pa
_lowerCAmelCase : str = rate_w
_lowerCAmelCase : Any = rate_t
_lowerCAmelCase : Dict = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_lowerCAmelCase : Tuple = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : Dict = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : List[Any] = -2 * np.random.rand(self.conva[1]) + 1
_lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa) + 1
_lowerCAmelCase : List[str] = -2 * np.random.rand(self.num_bpa) + 1
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__a, "wb") as f:
pickle.dump(__a, __a)
print(f"Model saved: {save_path}")
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
with open(__a, "rb") as f:
_lowerCAmelCase : Any = pickle.load(__a) # noqa: S301
_lowerCAmelCase : Tuple = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_lowerCAmelCase : int = model_dic.get("size_pooling1")
_lowerCAmelCase : Optional[Any] = model_dic.get("num_bp1")
_lowerCAmelCase : Optional[int] = model_dic.get("num_bp2")
_lowerCAmelCase : Any = model_dic.get("num_bp3")
_lowerCAmelCase : Dict = model_dic.get("rate_weight")
_lowerCAmelCase : int = model_dic.get("rate_thre")
# create model instance
_lowerCAmelCase : int = CNN(__a, __a, __a, __a, __a, __a, __a)
# modify model parameter
_lowerCAmelCase : Tuple = model_dic.get("w_conv1")
_lowerCAmelCase : Optional[int] = model_dic.get("wkj")
_lowerCAmelCase : List[str] = model_dic.get("vji")
_lowerCAmelCase : List[Any] = model_dic.get("thre_conv1")
_lowerCAmelCase : Union[str, Any] = model_dic.get("thre_bp2")
_lowerCAmelCase : List[Any] = model_dic.get("thre_bp3")
return conv_ins
def snake_case__ ( self, __a):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def snake_case__ ( self, __a):
'''simple docstring'''
return round(__a, 3)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = convs[0]
_lowerCAmelCase : Union[str, Any] = convs[1]
_lowerCAmelCase : Optional[int] = np.shape(__a)[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : Optional[Any] = []
for i_focus in range(0, size_data - size_conv + 1, __a):
for j_focus in range(0, size_data - size_conv + 1, __a):
_lowerCAmelCase : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a)
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[str] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__a):
_lowerCAmelCase : List[Any] = []
for i_focus in range(len(__a)):
_lowerCAmelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__a))
_lowerCAmelCase : List[str] = np.asmatrix(__a).reshape(
__a, __a)
data_featuremap.append(__a)
# expanding the data slice to One dimenssion
_lowerCAmelCase : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a))
_lowerCAmelCase : List[str] = np.asarray(__a)
return focus_list, data_featuremap
def snake_case__ ( self, __a, __a, __a="average_pool"):
'''simple docstring'''
_lowerCAmelCase : str = len(featuremaps[0])
_lowerCAmelCase : int = int(size_map / size_pooling)
_lowerCAmelCase : List[Any] = []
for i_map in range(len(__a)):
_lowerCAmelCase : int = featuremaps[i_map]
_lowerCAmelCase : List[Any] = []
for i_focus in range(0, __a, __a):
for j_focus in range(0, __a, __a):
_lowerCAmelCase : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a))
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a).reshape(__a, __a)
featuremap_pooled.append(__a)
return featuremap_pooled
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(len(__a)):
_lowerCAmelCase : str = np.shape(data[i])
_lowerCAmelCase : List[str] = data[i].reshape(1, shapes[0] * shapes[1])
_lowerCAmelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__a)
_lowerCAmelCase : Optional[int] = np.asarray(__a)
return data_expanded
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = np.asarray(__a)
_lowerCAmelCase : int = np.shape(__a)
_lowerCAmelCase : Union[str, Any] = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = 0
for i_map in range(__a):
_lowerCAmelCase : List[Any] = np.ones((size_map, size_map))
for i in range(0, __a, __a):
for j in range(0, __a, __a):
_lowerCAmelCase : int = pd_pool[
i_pool
]
_lowerCAmelCase : Any = i_pool + 1
_lowerCAmelCase : Tuple = np.multiply(
__a, np.multiply(out_map[i_map], (1 - out_map[i_map])))
pd_all.append(__a)
return pd_all
def snake_case__ ( self, __a, __a, __a, __a, __a, __a=bool):
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__a)))
print((" - - Shape: Teach_Data ", np.shape(__a)))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : List[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(__a)):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : Optional[Any] = np.asmatrix(datas_train[p])
_lowerCAmelCase : Optional[int] = np.asarray(datas_teach[p])
_lowerCAmelCase : str = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : int = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Tuple = np.shape(__a)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = np.dot(__a, self.vji.T) - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : List[str] = np.dot(__a, self.wkj.T) - self.thre_bpa
_lowerCAmelCase : Any = self.sig(__a)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : Dict = np.multiply(
(data_teach - bp_outa), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : int = np.multiply(
np.dot(__a, self.wkj), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : Union[str, Any] = np.dot(__a, self.vji)
_lowerCAmelCase : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : int = self._calculate_gradient_from_pool(
__a, __a, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_lowerCAmelCase : Dict = self._expand_mat(pd_conva_all[k_conv])
_lowerCAmelCase : Union[str, Any] = self.rate_weight * np.dot(__a, __a)
_lowerCAmelCase : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_lowerCAmelCase : List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : int = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : Union[str, Any] = rp + 1
_lowerCAmelCase : Optional[int] = error_count / patterns
all_mse.append(__a)
def draw_error():
_lowerCAmelCase : Any = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__a, "+-")
plt.plot(__a, "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__a, alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__a)))
for p in range(len(__a)):
_lowerCAmelCase : Tuple = np.asmatrix(datas_test[p])
_lowerCAmelCase : List[Any] = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : Optional[Any] = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : Any = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : Optional[int] = self.sig(__a)
produce_out.extend(bp_outa.getA().tolist())
_lowerCAmelCase : str = [list(map(self.do_round, __a)) for each in produce_out]
return np.asarray(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a)
_lowerCAmelCase : int = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : List[str] = self.pooling(__a, self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
from __future__ import annotations
import time
lowerCAmelCase__ = list[tuple[int, int]]
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = parent
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ )
__lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ )
__lowercase = [self.start]
__lowercase = False
def snake_case__ ( self ):
while self.node_queue:
__lowercase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowercase = True
return self.retrace_path(lowerCAmelCase_ )
__lowercase = self.get_successors(lowerCAmelCase_ )
for node in successors:
self.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) )
return successors
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = False
def snake_case__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowercase = self.fwd_bfs.node_queue.pop(0 )
__lowercase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowercase = True
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = self.fwd_bfs.retrace_path(lowerCAmelCase_ )
__lowercase = self.bwd_bfs.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BreadthFirstSearch(init, goal)
lowerCAmelCase__ = bfs.search()
lowerCAmelCase__ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase__ = bd_bfs.search()
lowerCAmelCase__ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 321 | import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = question_encoder
__lowercase = generator
__lowercase = self.question_encoder
def snake_case__ ( self , lowerCAmelCase_ ):
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase = os.path.join(lowerCAmelCase_ , "question_encoder_tokenizer" )
__lowercase = os.path.join(lowerCAmelCase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowercase = kwargs.pop("config" , lowerCAmelCase_ )
if config is None:
__lowercase = RagConfig.from_pretrained(lowerCAmelCase_ )
__lowercase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
__lowercase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.question_encoder
def snake_case__ ( self ):
__lowercase = self.generator
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "longest" , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowerCAmelCase_ , )
if max_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = labels["input_ids"]
return model_inputs
| 321 | 1 |
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] ):
lowercase__ : Tuple = ""
lowercase__ : List[Any] = ""
lowercase__ : Union[str, Any] = []
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ : Tuple = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE , n - 1 )
lowercase__ : Any = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ : str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Dict = worda
lowercase__ : Union[str, Any] = worda
lowercase__ : Optional[int] = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE ) )] for _ in range(len(SCREAMING_SNAKE_CASE ) )]
return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE ) - 1 , len(SCREAMING_SNAKE_CASE ) - 1 )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Dict = worda
lowercase__ : Optional[Any] = worda
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ : Optional[Any] = j
elif j == 0: # second string is empty
lowercase__ : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ : Optional[int] = self.dp[i - 1][j - 1]
else:
lowercase__ : str = self.dp[i][j - 1]
lowercase__ : Union[str, Any] = self.dp[i - 1][j]
lowercase__ : List[str] = self.dp[i - 1][j - 1]
lowercase__ : Dict = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCAmelCase__ = input('''Enter the first string: ''').strip()
lowerCAmelCase__ = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
"""simple docstring"""
def snake_case ( _a: int , _a: int )-> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
lowerCamelCase__ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCAmelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 |
from math import factorial
def _lowerCAmelCase ( lowerCAmelCase_ :int = 20 )->int:
'''simple docstring'''
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE :Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 283 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self , A_ , A_=None , A_=None , A_=None , A_="resnet50" , A_=3 , A_=32 , A_=3 , A_=True , A_=True , ) -> Any:
"""simple docstring"""
_lowercase: Optional[int] = parent
_lowercase: List[str] = out_indices if out_indices is not None else [4]
_lowercase: Union[str, Any] = stage_names
_lowercase: Tuple = out_features
_lowercase: Tuple = backbone
_lowercase: List[str] = batch_size
_lowercase: Optional[Any] = image_size
_lowercase: Any = num_channels
_lowercase: Optional[Any] = use_pretrained_backbone
_lowercase: Optional[int] = is_training
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase: Dict = self.get_config()
return config, pixel_values
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowercase: Any = TimmBackbone(config=A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_lowercase: int = model(A_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: str = self.prepare_config_and_inputs()
_lowercase , _lowercase: List[str] = config_and_inputs
_lowercase: Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Union[str, Any] = TimmBackboneModelTester(self )
_lowercase: int = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: List[Any] = '''resnet18'''
_lowercase: Tuple = '''microsoft/resnet-18'''
_lowercase: Union[str, Any] = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ )
_lowercase: List[str] = AutoBackbone.from_pretrained(A_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowercase: Any = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ , out_indices=[1, 2, 3] )
_lowercase: Optional[int] = AutoBackbone.from_pretrained(A_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: str = model_class(A_ )
_lowercase: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase: Dict = [*signature.parameters.keys()]
_lowercase: Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase: int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: Union[str, Any] = True
_lowercase: int = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase: Any = self.all_model_classes[0]
_lowercase: Union[str, Any] = model_class(A_ )
model.to(A_ )
_lowercase: str = self._prepare_for_class(A_ , A_ )
_lowercase: Optional[Any] = model(**A_ )
_lowercase: List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase: str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase: Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=A_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: str = model_class(A_ )
model.to(A_ )
model.eval()
_lowercase: Dict = model(**A_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase: List[Any] = copy.deepcopy(A_ )
_lowercase: Any = None
_lowercase: Dict = model_class(A_ )
model.to(A_ )
model.eval()
_lowercase: Union[str, Any] = model(**A_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowercase: str = copy.deepcopy(A_ )
_lowercase: Union[str, Any] = False
_lowercase: List[str] = model_class(A_ )
model.to(A_ )
model.eval()
_lowercase: str = model(**A_ )
| 272 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number | (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number & ~(1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number ^ (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( A , A , A , A , A = 16 ) -> Optional[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = DatasetDict(
{
'''train''': dataset['''train'''].select(A ),
'''validation''': dataset['''train'''].select(A ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( A , A ) -> int:
# New Code #
lowerCAmelCase__ = []
# Download the dataset
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowerCAmelCase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(A )
# New Code #
# Create our folds:
lowerCAmelCase__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowerCAmelCase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_fold_dataloaders(
A , A , A , A , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase__ = []
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase__ = torch.cat(A , dim=0 )
lowerCAmelCase__ = torch.stack(A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase__ = metric.compute(predictions=A , references=A )
accelerator.print('''Average test metrics from all folds:''' , A )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A , default=3 , help='''The number of splits to perform across the dataset''' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = UnCLIPImageVariationPipeline
A_ = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
A_ = IMAGE_VARIATION_BATCH_PARAMS
A_ = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
A_ = False
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__a : Optional[int] = UnCLIPTextProjModel(**__a )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__a : Any = UNetaDConditionModel(**__a )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(1 )
__a : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.dummy_decoder
__a : List[str] = self.dummy_text_proj
__a : Optional[Any] = self.dummy_text_encoder
__a : List[str] = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_super_res_first
__a : List[Any] = self.dummy_super_res_last
__a : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__a : Optional[int] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__a : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
__a : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCAmelCase ( self , __a , __a=0 , __a=True ):
'''simple docstring'''
__a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('mps' ):
__a : Dict = torch.manual_seed(__a )
else:
__a : int = torch.Generator(device=__a ).manual_seed(__a )
if pil_image:
__a : List[str] = input_image * 0.5 + 0.5
__a : Optional[int] = input_image.clamp(0 , 1 )
__a : List[str] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : Tuple = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'cpu'
__a : Optional[int] = self.get_dummy_components()
__a : int = self.pipeline_class(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : str = self.get_dummy_inputs(__a , pil_image=__a )
__a : List[Any] = pipe(**__a )
__a : Optional[Any] = output.images
__a : Optional[Any] = self.get_dummy_inputs(__a , pil_image=__a )
__a : Union[str, Any] = pipe(
**__a , return_dict=__a , )[0]
__a : Any = image[0, -3:, -3:, -1]
__a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : int = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'cpu'
__a : List[str] = self.get_dummy_components()
__a : Any = self.pipeline_class(**__a )
__a : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = self.get_dummy_inputs(__a , pil_image=__a )
__a : int = pipe(**__a )
__a : Any = output.images
__a : int = self.get_dummy_inputs(__a , pil_image=__a )
__a : List[Any] = pipe(
**__a , return_dict=__a , )[0]
__a : Union[str, Any] = image[0, -3:, -3:, -1]
__a : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu'
__a : List[str] = self.get_dummy_components()
__a : Any = self.pipeline_class(**__a )
__a : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Union[str, Any] = self.get_dummy_inputs(__a , pil_image=__a )
__a : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__a : Any = pipe(**__a )
__a : str = output.images
__a : Any = self.get_dummy_inputs(__a , pil_image=__a )
__a : Tuple = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__a : Dict = pipe(
**__a , return_dict=__a , )[0]
__a : Any = image[0, -3:, -3:, -1]
__a : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__a : List[str] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = torch.device('cpu' )
class __UpperCamelCase :
A_ = 1
__a : List[Any] = self.get_dummy_components()
__a : str = self.pipeline_class(**__a )
__a : int = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__a : List[Any] = pipe.decoder.dtype
__a : List[str] = 1
__a : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__a : Union[str, Any] = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
__a : Optional[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__a : int = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
__a : List[str] = self.get_dummy_inputs(__a , pil_image=__a )
__a : Tuple = pipe(
**__a , decoder_latents=__a , super_res_latents=__a ).images
__a : List[str] = self.get_dummy_inputs(__a , pil_image=__a )
# Don't pass image, instead pass embedding
__a : List[str] = pipeline_inputs.pop('image' )
__a : Dict = pipe.image_encoder(__a ).image_embeds
__a : str = pipe(
**__a , decoder_latents=__a , super_res_latents=__a , image_embeddings=__a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__a : str = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a , expected_max_diff=__a )
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = torch_device == 'cpu'
__a : Dict = True
__a : int = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , additional_params_copy_to_batched_inputs=__a , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__a : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a , additional_params_copy_to_batched_inputs=__a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a )
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__a : str = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__a : Tuple = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
__a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__a : Dict = pipeline(
__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__a , __a , 15 )
| 476 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : List[str] , lowercase_ : Dict=250002 , lowercase_ : int=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : str=16 , lowercase_ : List[str]=4096 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=514 , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[Any]=1E-05 , lowercase_ : Optional[int]=1 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : str="absolute" , lowercase_ : int=True , lowercase_ : List[Any]=768 , **lowercase_ : int , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : Any = intermediate_size
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : int = initializer_factor
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Dict = position_embedding_type
lowercase_ : int = use_cache
lowercase_ : List[Any] = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : List[str] , lowercase_ : Tuple=768 , lowercase_ : Any=3072 , lowercase_ : Union[str, Any]=512 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : str=32 , lowercase_ : Any="quick_gelu" , lowercase_ : int=1E-5 , lowercase_ : Any=0.0 , lowercase_ : int=0.02 , lowercase_ : Optional[int]=1.0 , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = projection_dim
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : int = num_channels
lowercase_ : Tuple = patch_size
lowercase_ : int = image_size
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = attention_dropout
lowercase_ : Any = layer_norm_eps
lowercase_ : str = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Dict ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Tuple , lowercase_ : List[str]=None , lowercase_ : Any=None , lowercase_ : Optional[Any]=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : Optional[int] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Tuple = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : Dict = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : int = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : Any = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : List[str] = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[Any] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : Union[str, Any] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : List[str] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[Any] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : str = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Union[str, Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Any = AltCLIPTextConfig(**lowercase_ )
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : List[Any] = logit_scale_init_value
lowercase_ : Tuple = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : str ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Tuple = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : int = self.__class__.model_type
return output
| 30 | '''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , *__A , **__A ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 99 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """luke"""
def __init__( self , __A=50267 , __A=500000 , __A=768 , __A=256 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=True , __A=None , __A=1 , __A=0 , __A=2 , **__A , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__a = vocab_size
__a = entity_vocab_size
__a = hidden_size
__a = entity_emb_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = use_entity_aware_attention
__a = classifier_dropout
| 99 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any]=1 ) -> Union[str, Any]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str ) -> Optional[int]:
TrainingJobAnalytics(__lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
# create estimator
A : Optional[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase ) | 17 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SavedModel()
UpperCAmelCase_ : str = []
with open(os.path.join(A_ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase_ : int = json.load(A_ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : Union[str, Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : List[str] = sorted(A_ )
UpperCAmelCase_ : List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(A_ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*A_ , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A ( _a ):
lowercase_ = 42
@flax_register_to_config
class A ( nn.Module ,_a ,_a ):
lowercase_ = 32
lowercase_ = 4
lowercase_ = 4
lowercase_ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
lowercase_ = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
lowercase_ = False
lowercase_ = (320, 640, 1280, 1280)
lowercase_ = 2
lowercase_ = 8
lowercase_ = None
lowercase_ = 1280
lowercase_ = 0.0
lowercase_ = False
lowercase_ = jnp.floataa
lowercase_ = True
lowercase_ = 0
lowercase_ = False
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> FrozenDict:
"""simple docstring"""
_a = (1, self.in_channels, self.sample_size, self.sample_size)
_a = jnp.zeros(__a , dtype=jnp.floataa )
_a = jnp.ones((1,) , dtype=jnp.intaa )
_a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_a , _a = jax.random.split(__a )
_a = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__a , __a , __a , __a )["params"]
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.block_out_channels
_a = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_a = self.num_attention_heads or self.attention_head_dim
# input
_a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_a = FlaxTimestepEmbedding(__a , dtype=self.dtype )
_a = self.only_cross_attention
if isinstance(__a , __a ):
_a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__a , __a ):
_a = (num_attention_heads,) * len(self.down_block_types )
# down
_a = []
_a = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(__a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_a = FlaxCrossAttnDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a = FlaxDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__a )
_a = down_blocks
# mid
_a = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_a = []
_a = list(reversed(__a ) )
_a = list(reversed(__a ) )
_a = list(reversed(__a ) )
_a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_a = output_channel
_a = reversed_block_out_channels[i]
_a = reversed_block_out_channels[min(i + 1 , len(__a ) - 1 )]
_a = i == len(__a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_a = FlaxCrossAttnUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a = FlaxUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__a )
_a = output_channel
_a = up_blocks
# out
_a = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str = True , lowerCAmelCase_ : List[Any] = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(__a , jnp.ndarray ):
_a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__a , jnp.ndarray ) and len(timesteps.shape ) == 0:
_a = timesteps.astype(dtype=jnp.floataa )
_a = jnp.expand_dims(__a , 0 )
_a = self.time_proj(__a )
_a = self.time_embedding(__a )
# 2. pre-process
_a = jnp.transpose(__a , (0, 2, 3, 1) )
_a = self.conv_in(__a )
# 3. down
_a = (sample,)
for down_block in self.down_blocks:
if isinstance(__a , __a ):
_a , _a = down_block(__a , __a , __a , deterministic=not train )
else:
_a , _a = down_block(__a , __a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_a = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a , __a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_a = new_down_block_res_samples
# 4. mid
_a = self.mid_block(__a , __a , __a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_a = down_block_res_samples[-(self.layers_per_block + 1) :]
_a = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a , __a ):
_a = up_block(
__a , temb=__a , encoder_hidden_states=__a , res_hidden_states_tuple=__a , deterministic=not train , )
else:
_a = up_block(__a , temb=__a , res_hidden_states_tuple=__a , deterministic=not train )
# 6. post-process
_a = self.conv_norm_out(__a )
_a = nn.silu(__a )
_a = self.conv_out(__a )
_a = jnp.transpose(__a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a )
| 22 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.json'''}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__lowerCAmelCase : Union[str, Any] = {'''mgp-str''': 27}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Dict , __magic_name__ :Any , __magic_name__ :Union[str, Any]="[GO]" , __magic_name__ :Optional[Any]="[GO]" , __magic_name__ :Dict="[s]" , __magic_name__ :int="[GO]" , **__magic_name__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle:
a__ = json.load(__magic_name__ )
a__ = {v: k for k, v in self.vocab.items()}
@property
def _UpperCamelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return len(self.vocab )
def _UpperCamelCase ( self :Any ) -> Any:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def _UpperCamelCase ( self :int , __magic_name__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self :Optional[int] , __magic_name__ :str ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__magic_name__ )
def _UpperCamelCase ( self :str , __magic_name__ :str , __magic_name__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__magic_name__ ) )
return
a__ = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' )
return (vocab_file,)
| 158 |
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Optional[int] = 8.988E9 # units = N * m^s * C^-2
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> dict[str, float]:
"""simple docstring"""
a__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
a__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__ = abs(UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__ = abs(UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__ = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :BigBirdConfig
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ :bool = True
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
super().setup()
_UpperCamelCase : Dict = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Union[str, Any] , *__a : Tuple , **__a : Tuple ) -> Optional[int]:
_UpperCamelCase : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[str] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
def cross_entropy(lowercase_ ,lowercase_ ,lowercase_=None ):
_UpperCamelCase : int = logits.shape[-1]
_UpperCamelCase : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("f4" )
_UpperCamelCase : int = jax.nn.log_softmax(lowercase_ ,axis=-1 )
_UpperCamelCase : Any = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
_UpperCamelCase : Optional[int] = reduction(lowercase_ )
return loss
_UpperCamelCase : int = partial(lowercase_ ,reduction=jnp.mean )
_UpperCamelCase : Tuple = cross_entropy(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = cross_entropy(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = cross_entropy(lowercase_ ,lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE__ :int = 3_000
SCREAMING_SNAKE_CASE__ :int = 10_500
SCREAMING_SNAKE_CASE__ :int = 128
SCREAMING_SNAKE_CASE__ :int = 3
SCREAMING_SNAKE_CASE__ :int = 1
SCREAMING_SNAKE_CASE__ :int = 5
# tx_args
SCREAMING_SNAKE_CASE__ :float = 3e-5
SCREAMING_SNAKE_CASE__ :float = 0.0
SCREAMING_SNAKE_CASE__ :int = 20_000
SCREAMING_SNAKE_CASE__ :float = 0.0_095
SCREAMING_SNAKE_CASE__ :str = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE__ :str = "training-expt"
SCREAMING_SNAKE_CASE__ :str = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE__ :str = "data/nq-validation.jsonl"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Any = os.path.join(self.base_dir , self.save_dir )
_UpperCamelCase : str = self.batch_size_per_device * jax.device_count()
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int = 4_096 # no dynamic padding on TPUs
def __call__( self : Any , __a : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return batch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] ) -> Optional[Any]:
_UpperCamelCase : str = self.fetch_inputs(features["input_ids"] )
_UpperCamelCase : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> List[str]:
_UpperCamelCase : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
while len(SCREAMING_SNAKE_CASE_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> Optional[Any]:
"""simple docstring"""
if seed is not None:
_UpperCamelCase : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
_UpperCamelCase : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap ,axis_name="batch" )
def lowercase__ ( lowercase_ ,lowercase_ ,**lowercase_ ) -> int:
"""simple docstring"""
def loss_fn(lowercase_ ):
_UpperCamelCase : Dict = model_inputs.pop("start_labels" )
_UpperCamelCase : List[Any] = model_inputs.pop("end_labels" )
_UpperCamelCase : List[Any] = model_inputs.pop("pooled_labels" )
_UpperCamelCase : List[Any] = state.apply_fn(**lowercase_ ,params=lowercase_ ,dropout_rng=lowercase_ ,train=lowercase_ )
_UpperCamelCase : Any = outputs
return state.loss_fn(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,)
_UpperCamelCase : Optional[int] = jax.random.split(lowercase_ )
_UpperCamelCase : Tuple = jax.value_and_grad(lowercase_ )
_UpperCamelCase : Optional[int] = grad_fn(state.params )
_UpperCamelCase : Tuple = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
_UpperCamelCase : Any = jax.lax.pmean(lowercase_ ,"batch" )
_UpperCamelCase : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="batch" )
def lowercase__ ( lowercase_ ,**lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = model_inputs.pop("start_labels" )
_UpperCamelCase : List[str] = model_inputs.pop("end_labels" )
_UpperCamelCase : int = model_inputs.pop("pooled_labels" )
_UpperCamelCase : List[Any] = state.apply_fn(**lowercase_ ,params=state.params ,train=lowercase_ )
_UpperCamelCase : Optional[int] = outputs
_UpperCamelCase : Optional[Any] = state.loss_fn(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : List[str] = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
return metrics
class __SCREAMING_SNAKE_CASE ( train_state.TrainState ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Args
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :wandb
SCREAMING_SNAKE_CASE__ :Callable = None
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str] , __a : Any , __a : Optional[Any] , __a : List[str]=None ) -> Optional[int]:
_UpperCamelCase : List[str] = model.params
_UpperCamelCase : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
_UpperCamelCase : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_UpperCamelCase : Any = build_tx(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
_UpperCamelCase : Optional[Any] = args
_UpperCamelCase : Union[str, Any] = data_collator
_UpperCamelCase : str = lr
_UpperCamelCase : Union[str, Any] = params
_UpperCamelCase : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_ )
return state
def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : str , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = self.args
_UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size
_UpperCamelCase : int = jax.random.PRNGKey(0 )
_UpperCamelCase : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_UpperCamelCase : Tuple = jnp.array(0 , dtype=jnp.floataa )
_UpperCamelCase : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=F'''Running EPOCH-{epoch}''' ):
_UpperCamelCase : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
_UpperCamelCase : List[str] = jax_utils.unreplicate(state.step )
_UpperCamelCase : str = running_loss.item() / i
_UpperCamelCase : Tuple = self.scheduler_fn(state_step - 1 )
_UpperCamelCase : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_ ) )
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict , __a : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size )
_UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size
_UpperCamelCase : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
_UpperCamelCase : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="Evaluating ... " ):
_UpperCamelCase : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[Any] ) -> Optional[Any]:
_UpperCamelCase : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " )
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , "data_collator.joblib" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , SCREAMING_SNAKE_CASE_ )
print("DONE" )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' ,end=" ... " )
with open(os.path.join(lowercase_ ,"flax_model.msgpack" ) ,"rb" ) as f:
_UpperCamelCase : Optional[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(lowercase_ ,"opt_state.msgpack" ) ,"rb" ) as f:
_UpperCamelCase : Dict = from_bytes(state.opt_state ,f.read() )
_UpperCamelCase : Any = joblib.load(os.path.join(lowercase_ ,"args.joblib" ) )
_UpperCamelCase : Optional[int] = joblib.load(os.path.join(lowercase_ ,"data_collator.joblib" ) )
with open(os.path.join(lowercase_ ,"training_state.json" ) ,"r" ) as f:
_UpperCamelCase : int = json.load(lowercase_ )
_UpperCamelCase : Optional[Any] = training_state["""step"""]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = num_train_steps - warmup_steps
_UpperCamelCase : int = optax.linear_schedule(init_value=lowercase_ ,end_value=lowercase_ ,transition_steps=lowercase_ )
_UpperCamelCase : Optional[int] = optax.linear_schedule(init_value=lowercase_ ,end_value=1e-7 ,transition_steps=lowercase_ )
_UpperCamelCase : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
def weight_decay_mask(lowercase_ ):
_UpperCamelCase : Dict = traverse_util.flatten_dict(lowercase_ )
_UpperCamelCase : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
_UpperCamelCase : Optional[int] = scheduler_fn(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : int = optax.adamw(learning_rate=lowercase_ ,weight_decay=lowercase_ ,mask=lowercase_ )
return tx, lr
| 624 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
"""simple docstring"""
import re
def UpperCamelCase ( _A ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def UpperCamelCase ( _A ) -> str:
lowercase : str = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCamelCase ( _A , _A , _A ) -> str:
try:
lowercase : int = split_input(_A )
if upper:
lowercase : List[Any] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase : List[Any] = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCamelCase ( _A ) -> str:
return to_simple_case(_A )
def UpperCamelCase ( _A ) -> str:
try:
lowercase : Optional[Any] = to_simple_case(_A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCamelCase ( _A , _A ) -> str:
return to_complex_case(_A , _A , """_""" )
def UpperCamelCase ( _A , _A ) -> str:
return to_complex_case(_A , _A , """-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 348 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class UpperCamelCase (__snake_case ):
def __init__( self :int , *__magic_name__ :List[Any] , **__magic_name__ :Optional[Any] ) ->Any:
super().__init__(*__magic_name__ , **__magic_name__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self :str , __magic_name__ :List[str]=None ) ->Tuple:
lowercase : Optional[Any] = {}
if top_k is not None:
lowercase : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self :Any , __magic_name__ :Union[str, List[str], "Image.Image", List["Image.Image"]] , **__magic_name__ :int ) ->List[str]:
return super().__call__(__magic_name__ , **__magic_name__ )
def __snake_case ( self :List[str] , __magic_name__ :Union[str, Any] ) ->Dict:
lowercase : Dict = load_image(__magic_name__ )
lowercase : Any = self.image_processor(images=__magic_name__ , return_tensors=self.framework )
return model_inputs
def __snake_case ( self :Union[str, Any] , __magic_name__ :str ) ->Union[str, Any]:
lowercase : List[str] = self.model(**__magic_name__ )
return model_outputs
def __snake_case ( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[str]=5 ) ->Dict:
if top_k > self.model.config.num_labels:
lowercase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase : str = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict = probs.topk(__magic_name__ )
elif self.framework == "tf":
lowercase : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase : List[Any] = tf.math.top_k(__magic_name__ , k=__magic_name__ )
lowercase , lowercase : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase : Dict = scores.tolist()
lowercase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__magic_name__ , __magic_name__ )]
| 348 | 1 |
from math import sqrt
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = 0
for i in range(1 , int(sqrt(UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCAmelCase ):
total += i + n // i
elif i == sqrt(UpperCAmelCase ):
total += i
return total - n
def _a ( UpperCAmelCase = 10000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = sum(
i
for i in range(1 , UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(UpperCAmelCase ) ) == i and sum_of_divisors(UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 315 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_A : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict , A : int = 1_0_1 ) ->Dict:
lowerCamelCase__ : Optional[int] = length
def __len__( self : Union[str, Any] ) ->Any:
return self.length
def __getitem__( self : str , A : str ) ->int:
return i
class __SCREAMING_SNAKE_CASE :
def __call__( self : Tuple , A : Any ) ->Any:
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ) ->Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase__ : List[str] = nn.Linear(1_2_0 , 8_0 )
def __lowerCamelCase ( self : Tuple , A : Dict , A : List[Any]=None ) ->Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@require_torch_neuroncore
def __lowerCamelCase ( self : int ) ->Optional[Any]:
lowerCamelCase__ : Optional[Any] = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCamelCase__ : int = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Tuple = F"--output_dir {output_dir}".split()
lowerCamelCase__ : Any = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@require_torch_multi_gpu
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ : Union[str, Any] = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCamelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : str = F"--output_dir {output_dir}".split()
lowerCamelCase__ : int = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_A : List[str] = HfArgumentParser((TrainingArguments,))
_A : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_A : List[str] = DummyDataset(dataset_length)
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = list(range(len(UpperCAmelCase ) ) )
lowerCamelCase__ : List[str] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_A : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_A : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_A : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_A : Union[str, Any] = 2
_A : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_A : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_A : List[Any] = None
| 315 | 1 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ =namedtuple("from_to", "from_ to")
lowerCAmelCase__ ={
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_01, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_04_54, 2_64.1_72),
"cubicyard": from_to(0.7_64_55, 1.3_07_95),
"cubicfoot": from_to(0.0_28, 35.31_47),
"cup": from_to(0.0_00_23_65_88, 42_26.75),
}
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(UpperCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(UpperCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ =logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 690 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=__UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = ['''speech''']
def __init__( self : List[str] ,*_a : Any ,**_a : Dict ):
'''simple docstring'''
requires_backends(self ,['speech'] )
class UpperCAmelCase__ ( metaclass=__UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ['''speech''']
def __init__( self : Optional[int] ,*_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
requires_backends(self ,['speech'] )
| 229 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : Tuple , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[Any] )-> Tuple:
snake_case = AudioClassificationPipeline(model=__snake_case , feature_extractor=__snake_case )
# test with a raw waveform
snake_case = np.zeros((3_40_00,) )
snake_case = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : str )-> Any:
snake_case , snake_case = examples
snake_case = audio_classifier(__snake_case )
# by default a model is initialized with num_labels=2
self.assertEqual(
__snake_case , [
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
] , )
snake_case = audio_classifier(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
] , )
self.run_torchaudio(__snake_case )
@require_torchaudio
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Optional[Any] )-> List[Any]:
import datasets
# test with a local file
snake_case = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
snake_case = dataset[0]["""audio"""]["""array"""]
snake_case = audio_classifier(__snake_case )
self.assertEqual(
__snake_case , [
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
] , )
@require_torch
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case = pipeline("""audio-classification""" , model=__snake_case )
snake_case = np.ones((80_00,) )
snake_case = audio_classifier(__snake_case , top_k=4 )
snake_case = [
{"""score""": 0.08_42, """label""": """no"""},
{"""score""": 0.08_38, """label""": """up"""},
{"""score""": 0.08_37, """label""": """go"""},
{"""score""": 0.08_34, """label""": """right"""},
]
snake_case = [
{"""score""": 0.08_45, """label""": """stop"""},
{"""score""": 0.08_44, """label""": """on"""},
{"""score""": 0.08_41, """label""": """right"""},
{"""score""": 0.08_34, """label""": """left"""},
]
self.assertIn(nested_simplify(__snake_case , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case = audio_classifier(__snake_case , top_k=4 )
self.assertIn(nested_simplify(__snake_case , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
import datasets
snake_case = """superb/wav2vec2-base-superb-ks"""
snake_case = pipeline("""audio-classification""" , model=__snake_case )
snake_case = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
snake_case = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
snake_case = audio_classifier(__snake_case , top_k=4 )
self.assertEqual(
nested_simplify(__snake_case , decimals=3 ) , [
{"""score""": 0.9_81, """label""": """go"""},
{"""score""": 0.0_07, """label""": """up"""},
{"""score""": 0.0_06, """label""": """_unknown_"""},
{"""score""": 0.0_01, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase ( self : Tuple )-> int:
pass
| 517 | 0 |
import os
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Tuple = len(grid[0] )
__UpperCAmelCase : int = len(__lowerCamelCase )
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
__UpperCAmelCase : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__UpperCAmelCase : Any = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__UpperCAmelCase : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__UpperCAmelCase : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__UpperCAmelCase : Optional[int] = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
__UpperCAmelCase : Union[str, Any] = max_product
return largest
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = []
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__UpperCAmelCase : Dict = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''MCTCTFeatureExtractor'''
__A = '''AutoTokenizer'''
def __init__( self : Any , lowercase_ : Any , lowercase_ : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowercase_ , lowercase_)
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
def __call__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[str]) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_UpperCamelCase = kwargs.pop("raw_speech")
else:
_UpperCamelCase = kwargs.pop("audio" , lowercase_)
_UpperCamelCase = kwargs.pop("sampling_rate" , lowercase_)
_UpperCamelCase = kwargs.pop("text" , lowercase_)
if len(lowercase_) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_UpperCamelCase = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
_UpperCamelCase = self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase = encodings["input_ids"]
return inputs
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : int , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_ , **lowercase_)
_UpperCamelCase = kwargs.pop("input_features" , lowercase_)
_UpperCamelCase = kwargs.pop("labels" , lowercase_)
if len(lowercase_) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if input_features is not None:
_UpperCamelCase = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
if labels is not None:
_UpperCamelCase = self.tokenizer.pad(lowercase_ , **lowercase_)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase = labels["input_ids"]
return input_features
def __UpperCAmelCase ( self : List[str] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@contextmanager
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __a ( A , A ) -> int:
'''simple docstring'''
A__ = old_name
if "patch_embed" in old_name:
A__ = old_name.split("." )
if layer == "0":
A__ = old_name.replace("0" , "convolution1" )
elif layer == "1":
A__ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
A__ = old_name.replace("3" , "convolution2" )
else:
A__ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __A ):
A__ = R'''\b\d{2}\b'''
if bool(re.search(__A , __A ) ):
A__ = re.search(R"\d\.\d\d." , __A ).group()
else:
A__ = re.search(R"\d\.\d." , __A ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(__A , "" )
A__ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
A__ = '''intermediate_stages.''' + trimmed_name
else:
A__ = old_name.replace(__A , "" )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
A__ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
A__ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
A__ = trimmed_name.replace("fc2" , "linear_out" )
A__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R".\d." , __A ):
A__ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
A__ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
A__ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
A__ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
A__ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
A__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace("norm" , "layernorm" )
A__ = '''efficientformer.''' + new_name
else:
A__ = '''efficientformer.encoder.''' + new_name
return new_name
def __a ( A , A ) -> Dict:
'''simple docstring'''
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(__A )
A__ = val
return checkpoint
def __a ( ) -> int:
'''simple docstring'''
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__A , stream=__A ).raw )
return image
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
A__ = torch.load(__A , map_location="cpu" )['''model''']
A__ = EfficientFormerConfig.from_json_file(__A )
A__ = EfficientFormerForImageClassificationWithTeacher(__A )
A__ = '''_'''.join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(__A , __A )
model.load_state_dict(__A )
model.eval()
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 256
A__ = 224
A__ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
A__ = processor(images=__A , return_tensors="pt" ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(__A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__A ),
ToTensor(),
Normalize(__A , __A ),
] )
A__ = image_transforms(__A ).unsqueeze(0 )
assert torch.allclose(__A , __A )
A__ = model(__A )
A__ = outputs.logits
A__ = (1, 1_000)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , __A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , __A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__A )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__A , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__A , )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
__UpperCAmelCase =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 337 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Union[str, Any]=6.0 , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple="fp4" , UpperCAmelCase : str=False , **UpperCAmelCase : Union[str, Any] , ) -> str:
'''simple docstring'''
lowercase : Optional[int] =load_in_abit
lowercase : Union[str, Any] =load_in_abit
lowercase : Tuple =llm_inta_threshold
lowercase : Optional[Any] =llm_inta_skip_modules
lowercase : int =llm_inta_enable_fpaa_cpu_offload
lowercase : Dict =llm_inta_has_fpaa_weight
lowercase : str =bnb_abit_quant_type
lowercase : int =bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase : str =torch.floataa
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Tuple =getattr(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , torch.dtype ):
lowercase : Optional[int] =bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def A__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , UpperCAmelCase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def A__ ( cls : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , **UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =cls(**UpperCAmelCase )
lowercase : Dict =[]
for key, value in kwargs.items():
if hasattr(UpperCAmelCase , UpperCAmelCase ):
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
to_remove.append(UpperCAmelCase )
for key in to_remove:
kwargs.pop(UpperCAmelCase , UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def A__ ( self : List[Any] , UpperCAmelCase : Union[str, os.PathLike] ) -> Dict:
'''simple docstring'''
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
lowercase : Any =self.to_dict()
lowercase : List[Any] =json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + '''\n'''
writer.write(UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Dict[str, Any]:
'''simple docstring'''
lowercase : Tuple =copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] =str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return f'{self.__class__.__name__} {self.to_json_string()}'
def A__ ( self : List[Any] , UpperCAmelCase : bool = True ) -> str:
'''simple docstring'''
if use_diff is True:
lowercase : int =self.to_diff_dict()
else:
lowercase : List[str] =self.to_dict()
return json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + "\n"
def A__ ( self : Any ) -> Dict[str, Any]:
'''simple docstring'''
lowercase : Any =self.to_dict()
# get the default config dict
lowercase : Union[str, Any] =BitsAndBytesConfig().to_dict()
lowercase : int ={}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase : Dict =value
return serializable_config_dict
| 94 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_lowerCAmelCase = key.replace('module.encoder', 'glpn.encoder' )
if key.startswith('module.decoder' ):
_lowerCAmelCase = key.replace('module.decoder', 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowerCAmelCase = key.replace(F'''patch_embed{idx}''', F'''patch_embeddings.{int(__lowerCamelCase )-1}''' )
if "norm" in key:
_lowerCAmelCase = key.replace('norm', 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_lowerCAmelCase = key.replace(F'''layer_norm{idx}''', F'''layer_norm.{int(__lowerCamelCase )-1}''' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace('layer_norm1', 'layer_norm_1' )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace('layer_norm2', 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find('block' ) + len('block' )]
_lowerCAmelCase = key.replace(F'''block{idx}''', F'''block.{int(__lowerCamelCase )-1}''' )
if "attn.q" in key:
_lowerCAmelCase = key.replace('attn.q', 'attention.self.query' )
if "attn.proj" in key:
_lowerCAmelCase = key.replace('attn.proj', 'attention.output.dense' )
if "attn" in key:
_lowerCAmelCase = key.replace('attn', 'attention.self' )
if "fc1" in key:
_lowerCAmelCase = key.replace('fc1', 'dense1' )
if "fc2" in key:
_lowerCAmelCase = key.replace('fc2', 'dense2' )
if "linear_pred" in key:
_lowerCAmelCase = key.replace('linear_pred', 'classifier' )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace('linear_fuse.conv', 'linear_fuse' )
_lowerCAmelCase = key.replace('linear_fuse.bn', 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_lowerCAmelCase = key.replace(F'''linear_c{idx}''', F'''linear_c.{int(__lowerCamelCase )-1}''' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace('bot_conv', '0.convolution' )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace('skip_conv1', '1.convolution' )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace('skip_conv2', '2.convolution' )
if "fusion1" in key:
_lowerCAmelCase = key.replace('fusion1', '1.fusion' )
if "fusion2" in key:
_lowerCAmelCase = key.replace('fusion2', '2.fusion' )
if "fusion3" in key:
_lowerCAmelCase = key.replace('fusion3', '3.fusion' )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace('conv', 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_lowerCAmelCase = key.replace('module.last_layer_depth', 'head.head' )
_lowerCAmelCase = value
return new_state_dict
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return image
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2], decoder_hidden_size=6_4, depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__lowerCamelCase, return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_lowerCAmelCase = torch.load(__lowerCamelCase, map_location=torch.device('cpu' ) )
# rename keys
_lowerCAmelCase = rename_keys(__lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(__lowerCamelCase, __lowerCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# forward pass
_lowerCAmelCase = model(__lowerCamelCase )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_lowerCAmelCase = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], __lowerCamelCase, atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase, __lowerCamelCase ), organization='nielsr', commit_message='Add model', use_temp_dir=__lowerCamelCase, )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase, __lowerCamelCase ), organization='nielsr', commit_message='Add image processor', use_temp_dir=__lowerCamelCase, )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
a__ : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A__ ( A__ ):
A__ = 42
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Optional[int] , _a : int = 16 , _a : int = 88 , _a : Optional[int] = None , _a : Optional[int] = None , _a : int = 1 , _a : float = 0.0 , _a : int = 32 , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : str = "geglu" , _a : bool = True , _a : bool = True , ) -> int:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =attention_head_dim
_SCREAMING_SNAKE_CASE =num_attention_heads * attention_head_dim
_SCREAMING_SNAKE_CASE =in_channels
_SCREAMING_SNAKE_CASE =torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
_SCREAMING_SNAKE_CASE =nn.Linear(_a , _a )
# 3. Define transformers blocks
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
_SCREAMING_SNAKE_CASE =nn.Linear(_a , _a )
def A ( self : Union[str, Any] , _a : Optional[int] , _a : Any=None , _a : Tuple=None , _a : Tuple=None , _a : Dict=1 , _a : Optional[Any]=None , _a : bool = True , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =hidden_states.shape
_SCREAMING_SNAKE_CASE =batch_frames // num_frames
_SCREAMING_SNAKE_CASE =hidden_states
_SCREAMING_SNAKE_CASE =hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_SCREAMING_SNAKE_CASE =self.norm(_a )
_SCREAMING_SNAKE_CASE =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
_SCREAMING_SNAKE_CASE =self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
_SCREAMING_SNAKE_CASE =block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
_SCREAMING_SNAKE_CASE =self.proj_out(_a )
_SCREAMING_SNAKE_CASE =(
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_SCREAMING_SNAKE_CASE =hidden_states.reshape(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 405 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowerCamelCase : Union[str, Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
lowerCamelCase : int = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class A__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , _a : argparse.Namespace , _a : Any=None , _a : int="base" , _a : Dict=None , _a : Tuple=None , _a : Any=None , **_a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =Path(self.hparams.output_dir )
_SCREAMING_SNAKE_CASE =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
_SCREAMING_SNAKE_CASE =config
_SCREAMING_SNAKE_CASE =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =MODEL_MODES[mode]
if model is None:
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =model
def A ( self : List[Any] , *_a : Optional[int] , **_a : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(*_a , **_a )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =arg_to_scheduler[self.hparams.lr_scheduler]
_SCREAMING_SNAKE_CASE =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_SCREAMING_SNAKE_CASE ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model
_SCREAMING_SNAKE_CASE =['bias', 'LayerNorm.weight']
_SCREAMING_SNAKE_CASE =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_SCREAMING_SNAKE_CASE =Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
_SCREAMING_SNAKE_CASE =AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_SCREAMING_SNAKE_CASE =optimizer
_SCREAMING_SNAKE_CASE =self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : Tuple , _a : Dict , _a : List[str] ) -> str:
'''simple docstring'''
return self.validation_step(_a , _a )
def A ( self : Dict , _a : str ) -> Dict:
'''simple docstring'''
return self.validation_end(_a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_SCREAMING_SNAKE_CASE =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : int , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
_SCREAMING_SNAKE_CASE =len(self.test_dataloader().dataset )
else:
_SCREAMING_SNAKE_CASE =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_a )
_SCREAMING_SNAKE_CASE =len(self.train_dataloader().dataset )
def A ( self : Union[str, Any] , _a : str , _a : int , _a : bool = False ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def A ( self : str ) -> str:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : int , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : Optional[Any] , _a : Dict[str, Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.output_dir.joinpath('best_tfmr' )
_SCREAMING_SNAKE_CASE =self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def A ( _a : Any , _a : Any ) -> List[str]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=_a , type=_a , required=_a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_a , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_a , type=_a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_a ).parent / 'test_run' / 'cache' ) , type=_a , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_a , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_a , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_a , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_a , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=_a , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_a , metavar=_a , type=_a , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_a , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_a , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_a )
parser.add_argument('--train_batch_size' , default=32 , type=_a )
parser.add_argument('--eval_batch_size' , default=32 , type=_a )
parser.add_argument('--adafactor' , action='store_true' )
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : int ) -> Dict:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : Tuple ) -> Tuple:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class A__ ( pl.Callback ):
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =trainer.lr_schedulers[0]['scheduler']
_SCREAMING_SNAKE_CASE ={f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def A ( self : List[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> Optional[int]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def A ( self : Optional[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log and save results to file
_SCREAMING_SNAKE_CASE =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_a , 'w' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> None:
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_UpperCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_UpperCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_UpperCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=_UpperCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCAmelCase ( _UpperCamelCase : BaseTransformer , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Any=[] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , **_UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_SCREAMING_SNAKE_CASE =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_SCREAMING_SNAKE_CASE =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCamelCase )
if logging_callback is None:
_SCREAMING_SNAKE_CASE =LoggingCallback()
_SCREAMING_SNAKE_CASE ={}
if args.fpaa:
_SCREAMING_SNAKE_CASE =16
if args.gpus > 1:
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE ='ddp'
_SCREAMING_SNAKE_CASE =args.accumulate_grad_batches
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE =pl.Trainer.from_argparse_args(
_UpperCamelCase , weights_summary=_UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCamelCase , )
if args.do_train:
trainer.fit(_UpperCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 405 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=UpperCamelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=UpperCamelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=UpperCamelCase__ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=UpperCamelCase__ , default=0 , help='cuda_id.' , )
__lowerCamelCase = parser.parse_args()
return args
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
if not len(UpperCamelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__lowerCamelCase , __lowerCamelCase = imgs[0].size
__lowerCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
__lowerCamelCase , __lowerCamelCase = grid.size
for i, img in enumerate(UpperCamelCase__ ):
grid.paste(UpperCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple="robotic cat with wings" , UpperCamelCase__ : Union[str, Any]=7.5 , UpperCamelCase__ : Optional[Any]=50 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : List[str]=42 , ) -> Any:
"""simple docstring"""
__lowerCamelCase = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase__ )
__lowerCamelCase = pipeline(
UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , ).images
__lowerCamelCase = int(math.sqrt(UpperCamelCase__ ) )
__lowerCamelCase = image_grid(UpperCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A = parse_args()
# Load models and create wrapper for stable diffusion
__A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__A = unet.to(torch.device("cuda", args.cuda_id))
__A = pipeline.to(unet.device)
__A , __A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__A = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 710 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__A = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__A = dataset.iloc[:, 1:2].values
__A = dataset.iloc[:, 2].values
__A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0)
__A = PolynomialFeatures(degree=4)
__A = poly_reg.fit_transform(X)
__A = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 167 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( _a ):
_A : List[Any] = '''M-CLIP'''
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]=1_024 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=768 ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Optional[Any] = transformerDimSize
SCREAMING_SNAKE_CASE:Optional[Any] = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE__ )
class _snake_case ( _a ):
_A : Optional[int] = MCLIPConfig
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = XLMRobertaModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.transformer(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE:List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE__ ), embs
| 143 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = GPTSwaTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "This is a test"
lowerCamelCase_ = "This is a test"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "<s>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase ) , 2000 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
| 675 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __snake_case ( SCREAMING_SNAKE_CASE: Tuple , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
_lowerCAmelCase = [[float('inf' ) for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
_lowerCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCAmelCase ):
# looping through rows of graph array
for i in range(_lowerCAmelCase ):
# looping through columns of graph array
for j in range(_lowerCAmelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCAmelCase = dist[i][k] + dist[k][j]
_print_dist(_lowerCAmelCase , _lowerCAmelCase )
return dist, v
if __name__ == "__main__":
_snake_case = int(input('''Enter number of vertices: '''))
_snake_case = int(input('''Enter number of edges: '''))
_snake_case = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
_snake_case = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
_snake_case = int(input('''Enter source:'''))
_snake_case = int(input('''Enter destination:'''))
_snake_case = float(input('''Enter weight:'''))
_snake_case = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 716 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = None
SCREAMING_SNAKE_CASE_: List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_: List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: List[Any] = False
SCREAMING_SNAKE_CASE_: Optional[int] = "tokenizer_file"
SCREAMING_SNAKE_CASE_: str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
super().setUp()
_lowerCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : List[str] , **UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_lowerCAmelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
_lowerCAmelCase = tokenizer.batch_encode_plus(UpperCAmelCase_ )['input_ids']
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : List[str]=6 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def __lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase_ )
_lowerCAmelCase = next(iter(UpperCAmelCase_ ) )['premise'] # pick up one data
_lowerCAmelCase = list(sample_data.values() )
_lowerCAmelCase = list(map(tokenizer.encode , UpperCAmelCase_ ) )
_lowerCAmelCase = [tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 491 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for num in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE : Any = odd_composites[num] - 2 * i * i
if is_prime(__UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCamelCase ) == n:
return list_nums
return []
def lowercase__( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 28 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = 13
UpperCAmelCase : Optional[int] = 7
UpperCAmelCase : int = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = True
UpperCAmelCase : Any = True
UpperCAmelCase : str = 99
UpperCAmelCase : Dict = 384
UpperCAmelCase : Dict = 2
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Tuple = 37
UpperCAmelCase : Optional[int] = """gelu"""
UpperCAmelCase : Tuple = 0.1
UpperCAmelCase : List[Any] = 0.1
UpperCAmelCase : Dict = 512
UpperCAmelCase : str = 16
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Tuple = 0.02
UpperCAmelCase : int = 3
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Tuple = 128
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Union[str, Any] = 9
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[str] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Any = None
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase : Any = [input_ids, input_mask]
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : int = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : str = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[Any] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : int = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : int = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : Dict = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = TFConvBertModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = True
if hasattr(_SCREAMING_SNAKE_CASE , """use_cache""" ):
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Dict = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
UpperCAmelCase : Any = getattr(self.model_tester , """key_length""" , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , """saved_model""" , """1""" )
UpperCAmelCase : Any = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase : Any = outputs["""encoder_hidden_states"""]
UpperCAmelCase : Optional[int] = outputs["""encoder_attentions"""]
else:
UpperCAmelCase : Optional[Any] = outputs["""hidden_states"""]
UpperCAmelCase : Dict = outputs["""attentions"""]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
UpperCAmelCase : Tuple = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
UpperCAmelCase : int = getattr(self.model_tester , """key_length""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = getattr(self.model_tester , """key_length""" , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase : Tuple = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
UpperCAmelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Dict = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 359 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( UpperCamelCase : str = "AAPL" ):
UpperCAmelCase : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase : Optional[int] = BeautifulSoup(requests.get(UpperCamelCase ).text , """html.parser""" )
UpperCAmelCase : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 359 | 1 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ : Tuple = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
_SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
_SCREAMING_SNAKE_CASE = subparsers.add_parser("""config""" , description=__snake_case )
else:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Accelerate config command""" , description=__snake_case )
parser.add_argument(
"""--config_file""" , default=__snake_case , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
_SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
_SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(__snake_case )
else:
config.to_yaml_file(__snake_case )
print(F"accelerate configuration saved at {config_file}" )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = config_command_parser()
_SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(__snake_case )
if __name__ == "__main__":
main()
| 591 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 215 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : Tuple = '''BridgeTowerImageProcessor'''
UpperCAmelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , do_normalize=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , **__lowerCAmelCase)
encoding.update(__lowerCAmelCase)
return encoding
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 710 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''switch_transformers'''
UpperCAmelCase_ : Tuple = ['''past_key_values''']
UpperCAmelCase_ : List[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , __lowerCAmelCase=32128 , __lowerCAmelCase=768 , __lowerCAmelCase=64 , __lowerCAmelCase=2048 , __lowerCAmelCase=64 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=False , __lowerCAmelCase=0.01 , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.001 , __lowerCAmelCase=0.001 , __lowerCAmelCase=1.0 , __lowerCAmelCase="relu" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split("""-""")
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == """gated"""
if len(__lowerCAmelCase) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = """gelu_new"""
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
| 605 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _lowerCamelCase( ):
__a , __a = 9, 1_4 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
__a = defaultdict(a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(a )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 528 | """simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
UpperCamelCase : List[Any] = "Dummy User"
UpperCamelCase : List[Any] = "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"
UpperCamelCase : Any = "https://hub-ci.huggingface.co"
UpperCamelCase : str = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCamelCase : Optional[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCamelCase : Optional[int] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( snake_case :Optional[Any] ) -> Union[str, Any]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case )
@pytest.fixture
def A ( snake_case :str ) -> List[Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case )
@pytest.fixture
def A ( snake_case :Optional[int] ) -> Dict:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case )
@pytest.fixture
def A ( snake_case :List[Any] , snake_case :Tuple ) -> Tuple:
HfFolder.save_token(snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> List[Any]:
return HfApi(endpoint=snake_case )
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi ) -> List[Any]:
__UpperCamelCase = HfFolder.get_token()
HfFolder.save_token(snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case )
@pytest.fixture
def A ( snake_case :str ) -> str:
def _cleanup_repo(snake_case :Union[str, Any] ):
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( snake_case :List[str] ) -> Any:
@contextmanager
def _temporary_repo(snake_case :Tuple ):
try:
yield repo_id
finally:
cleanup_repo(snake_case )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :Dict , snake_case :Dict ) -> List[str]:
__UpperCamelCase = f'repo_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data/text_data.txt' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :Optional[int] , snake_case :str , snake_case :Tuple ) -> Any:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[Any] , snake_case :str ) -> Optional[int]:
__UpperCamelCase = f'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :int , snake_case :Optional[int] , snake_case :Optional[int] ) -> str:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[str] , snake_case :Any ) -> List[Any]:
__UpperCamelCase = f'repo_zipped_img_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :Optional[int] ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 293 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCamelCase ( UpperCamelCase__ ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCamelCase ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase__ : List[str] = [1, 2, 3]
with pytest.raises(UpperCAmelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=2 )
with pytest.raises(UpperCAmelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [1, 2]
UpperCAmelCase__ : List[str] = {'''a''': 1, '''b''': 2}
UpperCAmelCase__ : str = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCAmelCase__ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCAmelCase__ : Dict = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCAmelCase__ : Optional[Any] = [2, 3]
UpperCAmelCase__ : List[Any] = {'''a''': 2, '''b''': 3}
UpperCAmelCase__ : List[str] = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCAmelCase__ : Any = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCAmelCase__ : Dict = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend("""spark""" ):
assert map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_ ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_ ) == expected_map_nested_sa | 407 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations(UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowercase : str = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCAmelCase_ )
for item in array )
_lowercase : Optional[Any] = answer
return answer
_lowercase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = [0] * (target + 1)
_lowercase : Dict = 1
for i in range(1 , target + 1 ):
for j in range(UpperCAmelCase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = 3
UpperCamelCase__ = 5
UpperCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 322 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__lowerCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__lowerCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__lowerCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
__lowerCAmelCase = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
__lowerCAmelCase = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__lowerCAmelCase = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
__lowerCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__lowerCAmelCase = np.expand_dims(test_image, axis=0)
__lowerCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__lowerCAmelCase = """Normal"""
if result[0][0] == 1:
__lowerCAmelCase = """Abnormality detected"""
| 708 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''AutoTokenizer'''
__UpperCAmelCase : Optional[Any] = ['''tokenizer''']
__UpperCAmelCase : str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Dict=None ):
'''simple docstring'''
super().__init__(_a )
_a : List[str] = speaker_embeddings
@classmethod
def __lowercase ( cls : Any ,_a : Optional[int] ,_a : Union[str, Any]="speaker_embeddings_path.json" ,**_a : Union[str, Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_a : Tuple = get_file_from_repo(
_a ,_a ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a ,_a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : List[Any] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : List[str] = json.load(_a )
else:
_a : str = None
_a : Any = AutoTokenizer.from_pretrained(_a ,**_a )
return cls(tokenizer=_a ,speaker_embeddings=_a )
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Any="speaker_embeddings_path.json" ,_a : Optional[int]="speaker_embeddings" ,_a : bool = False ,**_a : Optional[int] ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a ,_a ,'v2' ) ,exist_ok=_a )
_a : Optional[Any] = {}
_a : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Any = self._load_voice_preset(_a )
_a : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,_a ,F"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=_a ,)
_a : Dict = os.path.join(_a ,F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a ,_a ) ,'w' ) as fp:
json.dump(_a ,_a )
super().save_pretrained(_a ,_a ,**_a )
def __lowercase ( self : Tuple ,_a : str = None ,**_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.speaker_embeddings[voice_preset]
_a : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : List[Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : Tuple = np.load(_a )
return voice_preset_dict
def __lowercase ( self : List[Any] ,_a : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any ,_a : List[str]=None ,_a : Tuple=None ,_a : Tuple="pt" ,_a : Any=256 ,_a : Optional[Any]=False ,_a : List[str]=True ,_a : Optional[Any]=False ,**_a : Dict ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(_a ,_a ):
if (
isinstance(_a ,_a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Union[str, Any] = self._load_voice_preset(_a )
else:
if isinstance(_a ,_a ) and not voice_preset.endswith('.npz' ):
_a : str = voice_preset + '.npz'
_a : Optional[int] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a ,**_a )
_a : List[str] = BatchFeature(data=_a ,tensor_type=_a )
_a : List[Any] = self.tokenizer(
_a ,return_tensors=_a ,padding='max_length' ,max_length=_a ,return_attention_mask=_a ,return_token_type_ids=_a ,add_special_tokens=_a ,**_a ,)
if voice_preset is not None:
_a : Dict = voice_preset
return encoded_text
| 319 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase : Optional[int] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__UpperCamelCase : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__UpperCamelCase : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ),
}
| 4 |
import qiskit
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
snake_case : Dict = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
snake_case : Dict = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 204 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _A ( snake_case__ ):
lowercase_ : Tuple = '''bart'''
lowercase_ : Dict = ['''past_key_values''']
lowercase_ : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowerCamelCase__ : Union[str, Any]=5_02_65 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Any=40_96 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : str=40_96 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : str=10_24 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : int=2 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : int = encoder_ffn_dim
__UpperCamelCase : Tuple = encoder_layers
__UpperCamelCase : List[str] = encoder_attention_heads
__UpperCamelCase : int = decoder_ffn_dim
__UpperCamelCase : str = decoder_layers
__UpperCamelCase : Any = decoder_attention_heads
__UpperCamelCase : Tuple = dropout
__UpperCamelCase : List[str] = attention_dropout
__UpperCamelCase : Optional[Any] = activation_dropout
__UpperCamelCase : Optional[int] = activation_function
__UpperCamelCase : str = init_std
__UpperCamelCase : List[Any] = encoder_layerdrop
__UpperCamelCase : int = decoder_layerdrop
__UpperCamelCase : Dict = classifier_dropout
__UpperCamelCase : Optional[int] = use_cache
__UpperCamelCase : str = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _A ):
__UpperCamelCase : Tuple = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class _A ( snake_case__ ):
@property
def a ( self : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__UpperCamelCase : int = {0: 'batch'}
__UpperCamelCase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
__UpperCamelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__UpperCamelCase : Optional[Any] = self.num_layers
for i in range(_A ):
__UpperCamelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__UpperCamelCase : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Optional[Any] = super().outputs
else:
__UpperCamelCase : Dict = super(_A , self ).outputs
if self.use_past:
__UpperCamelCase : Any = self.num_layers
for i in range(_A ):
__UpperCamelCase : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Any = -1 , lowerCamelCase__ : Tuple = -1 , lowerCamelCase__ : List[Any] = False , lowerCamelCase__ : Dict = None , ):
"""simple docstring"""
__UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__UpperCamelCase : Optional[int] = seq_length if not self.use_past else 1
__UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
__UpperCamelCase : List[Any] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase : Optional[Any] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase : Optional[int] = common_inputs['input_ids'].shape
__UpperCamelCase : Any = common_inputs['decoder_input_ids'].shape[1]
__UpperCamelCase : str = self.num_attention_heads
__UpperCamelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Optional[Any] = decoder_seq_length + 3
__UpperCamelCase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_A , _A )] , dim=1 )
__UpperCamelCase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase : Dict = self.num_layers
__UpperCamelCase : int = min(_A , _A )
__UpperCamelCase : Union[str, Any] = max(_A , _A ) - min_num_layers
__UpperCamelCase : Tuple = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__UpperCamelCase : List[str] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def a ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : Dict = False , lowerCamelCase__ : Union[str, Any] = None , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase : Dict = seqlen + 2
__UpperCamelCase : str = self.num_layers
__UpperCamelCase : str = self.num_attention_heads
__UpperCamelCase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Any = common_inputs['attention_mask'].dtype
__UpperCamelCase : Optional[int] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__UpperCamelCase : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def a ( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int = -1 , lowerCamelCase__ : Dict = -1 , lowerCamelCase__ : int = False , lowerCamelCase__ : List[str] = None , ):
"""simple docstring"""
__UpperCamelCase : List[Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase : List[str] = tokenizer.num_special_tokens_to_add(_A )
__UpperCamelCase : Union[str, Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase : Any = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase : Dict = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def a ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict = -1 , lowerCamelCase__ : str = -1 , lowerCamelCase__ : str = False , lowerCamelCase__ : Optional[Any] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
__UpperCamelCase : Tuple = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def a ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Dict = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__UpperCamelCase : Optional[Any] = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
| 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = '''rag'''
lowercase_ : Tuple = True
def __init__( self : str , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=" / " , lowerCamelCase__ : Optional[int]=" // " , lowerCamelCase__ : int=5 , lowerCamelCase__ : int=3_00 , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : str=8 , lowerCamelCase__ : Any="wiki_dpr" , lowerCamelCase__ : Optional[Any]="train" , lowerCamelCase__ : Optional[int]="compressed" , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : str=False , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
bos_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , prefix=lowerCamelCase__ , vocab_size=lowerCamelCase__ , **lowerCamelCase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase : Any = kwargs.pop("""question_encoder""" )
__UpperCamelCase : List[Any] = question_encoder_config.pop("""model_type""" )
__UpperCamelCase : Union[str, Any] = kwargs.pop("""generator""" )
__UpperCamelCase : List[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase : Tuple = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] = reduce_loss
__UpperCamelCase : List[str] = label_smoothing
__UpperCamelCase : Union[str, Any] = exclude_bos_score
__UpperCamelCase : Tuple = do_marginalize
__UpperCamelCase : int = title_sep
__UpperCamelCase : Any = doc_sep
__UpperCamelCase : str = n_docs
__UpperCamelCase : Optional[int] = max_combined_length
__UpperCamelCase : Any = dataset
__UpperCamelCase : Tuple = dataset_split
__UpperCamelCase : List[Any] = index_name
__UpperCamelCase : List[str] = retrieval_vector_size
__UpperCamelCase : str = retrieval_batch_size
__UpperCamelCase : Optional[Any] = passages_path
__UpperCamelCase : Tuple = index_path
__UpperCamelCase : Dict = use_dummy_dataset
__UpperCamelCase : List[Any] = output_retrieved
__UpperCamelCase : Optional[int] = do_deduplication
__UpperCamelCase : int = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase : Union[str, Any] = getattr(self.generator , """forced_eos_token_id""" , lowerCamelCase__ )
@classmethod
def a ( cls : Any , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : PretrainedConfig , **lowerCamelCase__ : List[str] ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[str] = self.question_encoder.to_dict()
__UpperCamelCase : int = self.generator.to_dict()
__UpperCamelCase : Dict = self.__class__.model_type
return output
| 515 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =BlipImageProcessor()
UpperCAmelCase_ =GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_ =BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase_ =InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: str ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def lowerCAmelCase__ ( self: Tuple , **_lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def lowerCAmelCase__ ( self: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase_ =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =image_processor(_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ =processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a : Tuple = StableUnCLIPPipeline
a : Tuple = TEXT_TO_IMAGE_PARAMS
a : Any = TEXT_TO_IMAGE_BATCH_PARAMS
a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
a : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = 3_2
__lowercase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowercase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowercase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowerCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
__lowercase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Optional[int] , A_ : Optional[Any]=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowercase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__lowercase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe("""anime turle""" , generator=lowerCAmelCase_ , output_type="""np""" )
__lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 717 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = [False] * len(UpperCamelCase__ )
__lowercase = []
queue.append(UpperCamelCase__ )
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
__lowercase = True
__lowercase = u
return visited[t]
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = [-1] * (len(UpperCamelCase__ ))
__lowercase = 0
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
__lowercase = float("""Inf""" )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(UpperCamelCase__ , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
return max_flow
UpperCAmelCase__ =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ =0, 5
print(ford_fulkerson(graph, source, sink))
| 442 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 178 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ : Optional[int] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowercase ( _a ):
if isinstance(_a , torch.Tensor ):
return image
elif isinstance(_a , PIL.Image.Image ):
snake_case_ : int = [image]
snake_case_ : Union[str, Any] = [trans(img.convert('''RGB''' ) ) for img in image]
snake_case_ : int = torch.stack(_a )
return image
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Optional[int] ):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def _snake_case ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any ):
# get the original timestep using init_timestep
snake_case_ : List[Any] = min(int(num_inference_steps * strength ) , lowercase_ )
snake_case_ : Tuple = max(num_inference_steps - init_timestep , 0 )
snake_case_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict=None ):
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}" )
snake_case_ : Optional[Any] = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case_ : Dict = init_latents.shape
snake_case_ : List[Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('''add noise to latents at timestep''' , lowercase_ )
snake_case_ : Optional[Any] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : str = init_latents
return latents
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowercase_ : float = 0.8 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
self.check_inputs(lowercase_ )
# 2. Preprocess image
snake_case_ : str = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
snake_case_, snake_case_ : Any = self.get_timesteps(lowercase_ , lowercase_ , self.device )
snake_case_ : Optional[Any] = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
snake_case_ : Any = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
snake_case_ : Union[str, Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
snake_case_ : str = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ : Dict = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
snake_case_ : str = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 485 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_, snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 485 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = int(factor * num_class_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__SCREAMING_SNAKE_CASE = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__SCREAMING_SNAKE_CASE = int(factor * num_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=a__ , aesthetic_weight=0.1 , )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(desc="""downloading real regularization images""" , total=a__ )
with open(F'{class_data_dir}/caption.txt' , """w""" ) as fa, open(F'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
F'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
__SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
__SCREAMING_SNAKE_CASE = requests.get(images["""url"""] )
if img.status_code == 2_00:
__SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""""" , add_help=a__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=a__ , type=a__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=a__ , type=a__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_00 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 627 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Any = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "ctrl"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=246_534 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_280 , __SCREAMING_SNAKE_CASE : Dict=8_192 , __SCREAMING_SNAKE_CASE : Union[str, Any]=48 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-6 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 627 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowercase = random.Random()
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if rng is None:
lowerCAmelCase_ : int =global_rng
lowerCAmelCase_ : Optional[int] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : List[str]=400 , UpperCamelCase_ : List[Any]=2000 , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=16000 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : int=True , ):
lowerCAmelCase_ : int =parent
lowerCAmelCase_ : Union[str, Any] =batch_size
lowerCAmelCase_ : Dict =min_seq_length
lowerCAmelCase_ : Any =max_seq_length
lowerCAmelCase_ : Optional[int] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : int =feature_size
lowerCAmelCase_ : int =padding_value
lowerCAmelCase_ : Union[str, Any] =sampling_rate
lowerCAmelCase_ : Union[str, Any] =return_attention_mask
lowerCAmelCase_ : Optional[int] =do_normalize
def __A ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self : Optional[int] , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False ):
def _flatten(UpperCamelCase_ : Union[str, Any] ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowerCAmelCase_ : Tuple =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : str =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[str] =[np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ASTFeatureExtractor
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] =ASTFeatureExtractionTester(self )
def __A ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : int =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase_ : Any =[np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : List[str] =feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase_ : Optional[int] =feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowerCAmelCase_ : Optional[Any] =feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase_ : Optional[Any] =feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Optional[int] =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase_ : Union[str, Any] =np.asarray(UpperCamelCase_ )
lowerCAmelCase_ : int =feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase_ : str =feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
@require_torch
def __A ( self : Optional[Any] ):
import torch
lowerCAmelCase_ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : str =np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase_ : Optional[Any] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Tuple =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : Optional[int] =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ):
from datasets import load_dataset
lowerCAmelCase_ : Tuple =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase_ : Any =ds.sort('''id''' ).select(range(UpperCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self : int ):
# fmt: off
lowerCAmelCase_ : List[Any] =torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
lowerCAmelCase_ : Dict =self._load_datasamples(1 )
lowerCAmelCase_ : str =ASTFeatureExtractor()
lowerCAmelCase_ : str =feature_extractor(UpperCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase_ , atol=1E-4 ) )
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class _snake_case :
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=640 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="silu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase_ : List[str] =parent
lowerCAmelCase_ : Tuple =batch_size
lowerCAmelCase_ : Tuple =image_size
lowerCAmelCase_ : Any =patch_size
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Dict =last_hidden_size
lowerCAmelCase_ : Optional[int] =num_attention_heads
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =conv_kernel_size
lowerCAmelCase_ : int =output_stride
lowerCAmelCase_ : Tuple =hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] =attention_probs_dropout_prob
lowerCAmelCase_ : List[str] =classifier_dropout_prob
lowerCAmelCase_ : int =use_labels
lowerCAmelCase_ : Dict =is_training
lowerCAmelCase_ : Any =num_labels
lowerCAmelCase_ : Optional[Any] =initializer_range
lowerCAmelCase_ : List[str] =scope
def __A ( self : int ):
lowerCAmelCase_ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any =None
lowerCAmelCase_ : Optional[Any] =None
if self.use_labels:
lowerCAmelCase_ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Optional[Any] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ):
lowerCAmelCase_ : Optional[Any] =MobileViTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Optional[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase_ : List[str] =self.num_labels
lowerCAmelCase_ : Tuple =MobileViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase_ : Optional[int] =self.num_labels
lowerCAmelCase_ : Any =MobileViTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict ):
lowerCAmelCase_ : Any =self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int =config_and_inputs
lowerCAmelCase_ : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
def __A ( self : List[str] ):
lowerCAmelCase_ : str =MobileViTModelTester(self )
lowerCAmelCase_ : int =MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __A ( self : int ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __A ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __A ( self : List[str] ):
pass
def __A ( self : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] =model_class(UpperCamelCase_ )
lowerCAmelCase_ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] =[*signature.parameters.keys()]
lowerCAmelCase_ : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self : Dict ):
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase_ : Dict =outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] =5
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase_ : Union[str, Any] =2
for i in range(len(UpperCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __A ( self : int ):
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any =MobileViTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =self.default_image_processor
lowerCAmelCase_ : Optional[int] =prepare_img()
lowerCAmelCase_ : Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int =model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase_ : Optional[int] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Tuple =model.to(UpperCamelCase_ )
lowerCAmelCase_ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : int =prepare_img()
lowerCAmelCase_ : Optional[int] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =outputs.logits
# verify the logits
lowerCAmelCase_ : Dict =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : str =model.to(UpperCamelCase_ )
lowerCAmelCase_ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Union[str, Any] =prepare_img()
lowerCAmelCase_ : str =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[int] =model(**UpperCamelCase_ )
lowerCAmelCase_ : str =outputs.logits.detach().cpu()
lowerCAmelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase_ : Optional[int] =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
lowerCAmelCase_ : Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 305 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.