code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
UpperCAmelCase = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCAmelCase = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCAmelCase = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , lowerCamelCase_ )
# ffn -> feed_forward
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCAmelCase = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCAmelCase = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCAmelCase = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCAmelCase = """rwkv.""" + name
lowerCAmelCase = weight
return state_dict
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Any:
"""simple docstring"""
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCAmelCase = 50_277
lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCAmelCase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
lowerCAmelCase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
lowerCAmelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
lowerCAmelCase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
lowerCAmelCase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase = torch.load(lowerCamelCase_ , map_location="""cpu""" )
lowerCAmelCase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
lowerCAmelCase, lowerCAmelCase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
lowerCAmelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.""" )
lowerCAmelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
UpperCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 433 |
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = '''YOUR API KEY'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = giphy_api_key ):
__lowercase = '''+'''.join(query.split() )
__lowercase = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
__lowercase = requests.get(lowerCamelCase_ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 502 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = 0
@slow
def UpperCamelCase__ ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_UpperCAmelCase ) , 0 )
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCamelCase__ ( self ):
snake_case_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_UpperCAmelCase , '''vocab.txt''' ) )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='''bert''' , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_UpperCAmelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_UpperCAmelCase , '''merges.txt''' ) )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='''gpt2''' , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_UpperCAmelCase , '''vocab.txt''' ) )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='''bert''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_UpperCAmelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_UpperCAmelCase , '''merges.txt''' ) )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
with pytest.raises(_UpperCAmelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def UpperCamelCase__ ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case_ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def UpperCamelCase__ ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case_ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def UpperCamelCase__ ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
snake_case_ = TOKENIZER_MAPPING.values()
snake_case_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase )
@require_tokenizers
def UpperCamelCase__ ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _UpperCAmelCase )
@require_tokenizers
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_UpperCAmelCase )
snake_case_ = '''Hello, world. How are you?'''
snake_case_ = tokenizer.tokenize(_UpperCAmelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case_ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_UpperCAmelCase )
snake_case_ = tokenizer.tokenize(_UpperCAmelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
# Check we can load the tokenizer config of an online model.
snake_case_ = get_tokenizer_config('''bert-base-cased''' )
snake_case_ = config.pop('''_commit_hash''' , _UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case_ = get_tokenizer_config(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = get_tokenizer_config(_UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def UpperCamelCase__ ( self ):
try:
AutoConfig.register('''custom''' , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
snake_case_ = CustomTokenizer.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase__ ( self ):
try:
AutoConfig.register('''custom''' , _UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = BertTokenizerFast.from_pretrained(_UpperCAmelCase )
bert_tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = CustomTokenizerFast.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCAmelCase ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def UpperCamelCase__ ( self ):
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = False
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = NewTokenizer
__snake_case = False
try:
AutoConfig.register('''custom''' , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
# If remote code is not set, the default is to use local
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
_UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case_ = AutoTokenizer.from_pretrained('''bert-base''' )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
_UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='''aaaaaa''' )
def UpperCamelCase__ ( self ):
# Make sure we have cached the tokenizer.
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 531 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __lowerCAmelCase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Tuple:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = field(
metadata={"help": "The csv file to plot."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__snake_case = list_field(
default=lowerCamelCase__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = args
snake_case_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
snake_case_ = csv.DictReader(_UpperCAmelCase )
for row in reader:
snake_case_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
snake_case_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
snake_case_ = float(row['''result'''] )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = plt.subplots()
snake_case_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
snake_case_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
snake_case_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
snake_case_ = self.result_dict[model_name]['''result''']
((snake_case_) , (snake_case_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
snake_case_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case_) , (snake_case_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
snake_case_ = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
snake_case_ = title_str[:-4]
snake_case_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __lowerCAmelCase ()-> int:
"""simple docstring"""
snake_case_ = HfArgumentParser(SCREAMING_SNAKE_CASE )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main() | 531 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : List[str] , A : List[Any] , A : Tuple ):
# Initialise PyTorch model
snake_case__ : int = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : Any = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 478 |
from __future__ import annotations
def lowercase_ (A : list , A : int | None = None , A : int | None = None ):
if start is None:
snake_case__ : Any = 0
if end is None:
snake_case__ : List[str] = len(A ) - 1
if start >= end:
return
snake_case__ : Optional[int] = (start + end) // 2
slowsort(A , A , A )
slowsort(A , mid + 1 , A )
if sequence[end] < sequence[mid]:
snake_case__ , snake_case__ : Union[str, Any] = sequence[mid], sequence[end]
slowsort(A , A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 478 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""audio""": Audio()})
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__SCREAMING_SNAKE_CASE : str = "audio"
__SCREAMING_SNAKE_CASE : str = "transcription"
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Any ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __UpperCamelCase ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.input_schema.copy()
_UpperCAmelCase = features[self.audio_column]
_UpperCAmelCase = input_schema
return task_template
@property
def UpperCAmelCase__ ( self : Tuple ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 719 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[int, Tuple[int, int]] , __UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
_UpperCAmelCase = nn.BatchNormad(__UpperCamelCase )
_UpperCAmelCase = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = self.conv(__UpperCamelCase )
_UpperCAmelCase = self.batch_norm(__UpperCamelCase )
_UpperCAmelCase = self.activation(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(__UpperCamelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : Tuple[int, ...] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool ):
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(__UpperCamelCase ):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.bottleneck(__UpperCamelCase )
return output
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor ):
# build laterals
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
_UpperCAmelCase = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.fpn_bottleneck(__UpperCamelCase )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 3 , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
# just take the relevant feature maps
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = UperNetConfig
__SCREAMING_SNAKE_CASE : str = """pixel_values"""
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = value
__lowerCAmelCase = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowercase , )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : str ):
super().__init__(__UpperCamelCase )
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
_UpperCAmelCase = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 129 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowercase : Optional[int] = get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[int] = (
os.path.join(SCREAMING_SNAKE_CASE_ ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case : Union[str, Any] = Extractor
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case : str = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir ,hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
snake_case : Optional[Any] = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
snake_case : int = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return output_path
class _A ( snake_case ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
...
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : List[bytes] = []
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = b"" ):
'''simple docstring'''
if not magic_number:
snake_case : str = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case : Dict = cls.read_magic_number(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class _A ( snake_case ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def resolved(SCREAMING_SNAKE_CASE_ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case : Optional[Any] = resolved(os.path.join(SCREAMING_SNAKE_CASE_ ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ ,members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = [B'''\x1F\x8B''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with gzip.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = b"" ):
'''simple docstring'''
if super().is_extractable(SCREAMING_SNAKE_CASE_ ,magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as fp:
snake_case : List[Any] = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case : List[Any] = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
snake_case : str = struct.unpack(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"""r""" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
snake_case : Any = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as ifh, open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = [B'''\x42\x5A\x68''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with bza.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ ,"""r""" ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Any = [B'''\x04\x22\x4D\x18''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A :
'''simple docstring'''
__lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case_ ( cls ):
'''simple docstring'''
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ ,magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" ,category=SCREAMING_SNAKE_CASE_ ,)
snake_case : Union[str, Any] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ): # <Added version="2.4.0"/>
'''simple docstring'''
snake_case : str = cls._get_magic_number_max_length()
snake_case : Optional[Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ,magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "deprecated" ,):
'''simple docstring'''
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ,exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
snake_case : str = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix(""".lock""" ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ ,ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" ,category=SCREAMING_SNAKE_CASE_ ,)
snake_case : Dict = extractor if extractor != """deprecated""" else extractor_format
else:
snake_case : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" ,category=SCREAMING_SNAKE_CASE_ ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 337 | 0 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE =set()
return any(
node not in visited and depth_first_search(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
for node in graph )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =DatasetDict(
{
'train': dataset['train'].select(lowerCAmelCase_ ),
'validation': dataset['train'].select(lowerCAmelCase_ ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['test'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Download the dataset
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
# Create our splits
SCREAMING_SNAKE_CASE =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE =MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE =kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
SCREAMING_SNAKE_CASE =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_fold_dataloaders(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
SCREAMING_SNAKE_CASE =loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE =[]
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE =torch.cat(lowerCAmelCase_, dim=0 )
SCREAMING_SNAKE_CASE =torch.stack(lowerCAmelCase_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE =metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
accelerator.print('Average test metrics from all folds:', lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=lowerCAmelCase_, default=3, help='The number of splits to perform across the dataset' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 252 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str = '''realm'''
def __init__( self : Any , UpperCAmelCase : List[str]=3_0522 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : List[str]=128 , UpperCAmelCase : int=12 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : List[str]="gelu_new" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[Any]=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Any=1e-1_2 , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[Any]=1e-3 , UpperCAmelCase : int=5 , UpperCAmelCase : int=320 , UpperCAmelCase : List[str]=1335_3718 , UpperCAmelCase : List[str]=5000 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : int , ) -> Tuple:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
lowerCAmelCase :Optional[int] = vocab_size
lowerCAmelCase :List[Any] = max_position_embeddings
lowerCAmelCase :str = hidden_size
lowerCAmelCase :str = retriever_proj_size
lowerCAmelCase :int = num_hidden_layers
lowerCAmelCase :int = num_attention_heads
lowerCAmelCase :str = num_candidates
lowerCAmelCase :Any = intermediate_size
lowerCAmelCase :int = hidden_act
lowerCAmelCase :List[str] = hidden_dropout_prob
lowerCAmelCase :List[Any] = attention_probs_dropout_prob
lowerCAmelCase :Union[str, Any] = initializer_range
lowerCAmelCase :str = type_vocab_size
lowerCAmelCase :Tuple = layer_norm_eps
# Reader config
lowerCAmelCase :List[Any] = span_hidden_size
lowerCAmelCase :int = max_span_width
lowerCAmelCase :List[str] = reader_layer_norm_eps
lowerCAmelCase :Optional[int] = reader_beam_size
lowerCAmelCase :Any = reader_seq_len
# Retrieval config
lowerCAmelCase :Tuple = num_block_records
lowerCAmelCase :str = searcher_beam_size | 553 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=6 , __lowerCAmelCase=17 , __lowerCAmelCase=23 , __lowerCAmelCase=11 , __lowerCAmelCase=True , ):
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : Dict = act_dim
UpperCamelCase_ : Optional[int] = state_dim
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : str = max_length
UpperCamelCase_ : int = is_training
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase_ : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase_ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase_ : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase_ : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
UpperCamelCase_ : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase_ : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _UpperCAmelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
UpperCamelCase_ : Tuple = DecisionTransformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase_ : str = model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) : str = config_and_inputs
UpperCamelCase_ : int = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : Tuple = (DecisionTransformerModel,) if is_torch_available() else ()
__a : Union[str, Any] = ()
__a : int = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__a : Optional[Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__a : int = False
__a : Tuple = False
__a : Dict = False
__a : Union[str, Any] = False
__a : int = False
__a : List[Any] = False
__a : List[str] = False
__a : Dict = False
__a : Optional[Any] = False
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = DecisionTransformerModelTester(self )
UpperCamelCase_ : Dict = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def _UpperCAmelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Union[str, Any] = DecisionTransformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = model_class(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Any = [*signature.parameters.keys()]
UpperCamelCase_ : List[str] = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__lowerCAmelCase )] , __lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase_ : Dict = 10 # defined by the RL environment, may be normalized
UpperCamelCase_ : Any = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
UpperCamelCase_ : Tuple = model.to(__lowerCAmelCase )
UpperCamelCase_ : List[str] = model.config
torch.manual_seed(0 )
UpperCamelCase_ : str = torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCAmelCase , dtype=torch.floataa ) # env.reset()
UpperCamelCase_ : List[str] = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__lowerCAmelCase )
UpperCamelCase_ : Tuple = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase_ : Optional[int] = state
UpperCamelCase_ : int = torch.zeros(1 , 0 , config.act_dim , device=__lowerCAmelCase , dtype=torch.floataa )
UpperCamelCase_ : Optional[int] = torch.zeros(1 , 0 , device=__lowerCAmelCase , dtype=torch.floataa )
UpperCamelCase_ : Optional[Any] = torch.tensor(0 , device=__lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowerCAmelCase ):
UpperCamelCase_ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowerCAmelCase )] , dim=1 )
UpperCamelCase_ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowerCAmelCase )] , dim=1 )
UpperCamelCase_ : Union[str, Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[Any] = model(
states=__lowerCAmelCase , actions=__lowerCAmelCase , rewards=__lowerCAmelCase , returns_to_go=__lowerCAmelCase , timesteps=__lowerCAmelCase , attention_mask=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase_ : Any = action_pred[0, -1]
UpperCamelCase_ : int = torch.cat([states, state] , dim=1 )
UpperCamelCase_ : str = returns_to_go[0, -1] - reward
UpperCamelCase_ : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase_ : int = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 208 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A__ ( _lowerCamelCase):
A_ : List[str] = 'beit'
def __init__( self , _SCREAMING_SNAKE_CASE=81_92 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_55 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : List[Any] = use_mask_token
__lowerCAmelCase : Any = use_absolute_position_embeddings
__lowerCAmelCase : int = use_relative_position_bias
__lowerCAmelCase : List[str] = use_shared_relative_position_bias
__lowerCAmelCase : Tuple = layer_scale_init_value
__lowerCAmelCase : int = drop_path_rate
__lowerCAmelCase : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase : Optional[Any] = out_indices
__lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase : Union[str, Any] = use_auxiliary_head
__lowerCAmelCase : Optional[int] = auxiliary_loss_weight
__lowerCAmelCase : Optional[Any] = auxiliary_channels
__lowerCAmelCase : List[str] = auxiliary_num_convs
__lowerCAmelCase : List[str] = auxiliary_concat_input
__lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4 | 549 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 549 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase :List[str] = randrange(len(a__ ) ), randrange(len(a__ ) )
lowerCAmelCase :List[str] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCAmelCase , lowerCAmelCase :List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase ( a__ = 1_00 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(a__ ))
@pytest.mark.parametrize('hand, expected' , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , a__ )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = PokerHand(a__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , a__ )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ ).compare_with(PokerHand(a__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
assert PokerHand(a__ ).compare_with(PokerHand(a__ ) ) == expected
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Dict = [PokerHand(a__ ) for hand in SORTED_HANDS]
lowerCAmelCase :Dict = poker_hands.copy()
shuffle(a__ )
lowerCAmelCase :List[Any] = chain(sorted(a__ ) )
for index, hand in enumerate(a__ ):
assert hand == poker_hands[index]
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=a__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Dict = PokerHand('2C 4S AS 3D 5C' )
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :str = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :int = 0
lowerCAmelCase :Optional[Any] = os.path.abspath(os.path.dirname(a__ ) )
lowerCAmelCase :Dict = os.path.join(a__ , 'poker_hands.txt' )
with open(a__ ) as file_hand:
for line in file_hand:
lowerCAmelCase :Optional[int] = line[:14].strip()
lowerCAmelCase :Union[str, Any] = line[15:].strip()
lowerCAmelCase , lowerCAmelCase :Dict = PokerHand(a__ ), PokerHand(a__ )
lowerCAmelCase :int = player.compare_with(a__ )
if output == "Win":
answer += 1
assert answer == 3_76 | 553 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = MobileBertConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase :Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
lowerCAmelCase :Any = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 553 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for i in range(len(lowerCAmelCase ) - 1 , 0 , -1 ):
UpperCAmelCase = False
for j in range(lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase , UpperCAmelCase = unsorted[j - 1], unsorted[j]
UpperCAmelCase = True
for j in range(lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase , UpperCAmelCase = unsorted[j + 1], unsorted[j]
UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 378 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase_ ( a_ ):
_A : Tuple = 'canine'
def __init__( self , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_63_84 , snake_case__=16 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0xE_0_0_0 , snake_case__=0xE_0_0_1 , snake_case__=4 , snake_case__=4 , snake_case__=8 , snake_case__=1_63_84 , snake_case__=1_28 , **snake_case__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
# Character config:
UpperCAmelCase = downsampling_rate
UpperCAmelCase = upsampling_kernel_size
UpperCAmelCase = num_hash_functions
UpperCAmelCase = num_hash_buckets
UpperCAmelCase = local_transformer_stride
| 378 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase__ : Any = get_logger(__name__)
class _snake_case ( enum.Enum ):
__lowerCAmelCase : Optional[Any] = 'all_checks'
__lowerCAmelCase : int = 'basic_checks'
__lowerCAmelCase : str = 'no_checks'
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) )
lowercase__ : Union[str, Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase__ : Dict = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase_ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
class _snake_case ( UpperCAmelCase_ ):
pass
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
lowercase__ : List[str] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase_ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCamelCase ( lowercase_ , lowercase_ = True ) -> dict:
'''simple docstring'''
if record_checksum:
lowercase__ : Dict = shaaaa()
with open(lowercase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowercase_ )
lowercase__ : Dict = m.hexdigest()
else:
lowercase__ : Optional[Any] = None
return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum}
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 12 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase ( UpperCamelCase : List[str] ) -> Optional[Any]:
_lowerCamelCase = 3_84
_lowerCamelCase = 7
if "tiny" in model_name:
_lowerCamelCase = 96
_lowerCamelCase = (2, 2, 6, 2)
_lowerCamelCase = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCamelCase = 96
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCamelCase = 1_28
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (4, 8, 16, 32)
_lowerCamelCase = 12
_lowerCamelCase = 5_12
elif "large" in model_name:
_lowerCamelCase = 1_92
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (6, 12, 24, 48)
_lowerCamelCase = 12
_lowerCamelCase = 7_68
# set label information
_lowerCamelCase = 1_50
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'ade20k-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = SwinConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , num_heads=UpperCamelCase , window_size=UpperCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
_lowerCamelCase = UperNetConfig(
backbone_config=UpperCamelCase , auxiliary_in_channels=UpperCamelCase , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , )
return config
def lowerCamelCase ( UpperCamelCase : Union[str, Any] ) -> int:
_lowerCamelCase = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Any ) -> str:
_lowerCamelCase = dct.pop(UpperCamelCase )
_lowerCamelCase = val
def lowerCamelCase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ) -> str:
_lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[:dim, :]
_lowerCamelCase = in_proj_bias[: dim]
_lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase = in_proj_weight[
-dim :, :
]
_lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[int]:
_lowerCamelCase , _lowerCamelCase = x.shape
_lowerCamelCase = x.reshape(UpperCamelCase , 4 , in_channel // 4 )
_lowerCamelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
return x
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> int:
_lowerCamelCase , _lowerCamelCase = x.shape
_lowerCamelCase = x.reshape(UpperCamelCase , in_channel // 4 , 4 )
_lowerCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
return x
def lowerCamelCase ( UpperCamelCase : Tuple ) -> List[str]:
_lowerCamelCase = x.shape[0]
_lowerCamelCase = x.reshape(4 , in_channel // 4 )
_lowerCamelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase )
return x
def lowerCamelCase ( UpperCamelCase : Tuple ) -> Union[str, Any]:
_lowerCamelCase = x.shape[0]
_lowerCamelCase = x.reshape(in_channel // 4 , 4 )
_lowerCamelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase )
return x
def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict ) -> Union[str, Any]:
_lowerCamelCase = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowerCamelCase = model_name_to_url[model_name]
_lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='cpu' , file_name=UpperCamelCase )[
'state_dict'
]
for name, param in state_dict.items():
print(UpperCamelCase , param.shape )
_lowerCamelCase = get_upernet_config(UpperCamelCase )
_lowerCamelCase = UperNetForSemanticSegmentation(UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCamelCase = state_dict.pop(UpperCamelCase )
if "bn" in key:
_lowerCamelCase = key.replace('bn' , 'batch_norm' )
_lowerCamelCase = val
# rename keys
_lowerCamelCase = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCamelCase = reverse_correct_unfold_reduction_order(UpperCamelCase )
if "norm" in key:
_lowerCamelCase = reverse_correct_unfold_norm_order(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify on image
_lowerCamelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('RGB' )
_lowerCamelCase = SegformerImageProcessor()
_lowerCamelCase = processor(UpperCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowerCamelCase = model(UpperCamelCase )
_lowerCamelCase = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCamelCase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
_lowerCamelCase = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
_lowerCamelCase = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
_lowerCamelCase = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 544 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = GPTSwaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def a__ ( self : List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = "This is a test"
lowerCamelCase__ = "This is a test"
return input_text, output_text
def a__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = "<s>"
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def a__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCAmelCase ) , 2000 )
def a__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def a__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [465, 287, 265, 631, 842] )
lowerCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
_lowerCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCamelCase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) , _lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def a__ ( self : Any ) -> int:
'''simple docstring'''
lowerCamelCase__ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCamelCase__ = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=_lowerCAmelCase , )
| 709 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowerCamelCase__ = order
# a_{0} ... a_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase__ = [0.0] * self.order
def a__ ( self : Dict , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) < self.order:
lowerCamelCase__ = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
lowerCamelCase__ = a_coeffs
lowerCamelCase__ = b_coeffs
def a__ ( self : Dict , __lowerCamelCase : float ) -> float:
'''simple docstring'''
lowerCamelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase__ = self.input_history[:-1]
lowerCamelCase__ = self.output_history[:-1]
lowerCamelCase__ = sample
lowerCamelCase__ = result
return result
| 187 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( __lowercase ):
def a ( self : Any ) -> int:
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'depth_multiplier' ) )
class _lowercase :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=13 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Tuple=0.2_5 , SCREAMING_SNAKE_CASE_ : Tuple=8 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Dict=1024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[int]="relu6" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=10 , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> int:
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = min_depth
__snake_case = tf_padding
__snake_case = int(last_hidden_size * depth_multiplier )
__snake_case = output_stride
__snake_case = hidden_act
__snake_case = classifier_dropout_prob
__snake_case = use_labels
__snake_case = is_training
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = scope
def a ( self : int ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self : List[Any] ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
__snake_case = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
__snake_case = self.num_labels
__snake_case = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def a ( self : str ) -> str:
__snake_case = MobileNetVaModelTester(self )
__snake_case = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def a ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def a ( self : Any ) -> Dict:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def a ( self : Dict ) -> Any:
pass
def a ( self : int ) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.hidden_states
__snake_case = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def a ( self : Union[str, Any] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _a () -> int:
"""simple docstring"""
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def a ( self : List[Any] ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def a ( self : Dict ) -> List[str]:
__snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 56 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( a__ ):
snake_case__ = "deformable_detr"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=3 , UpperCAmelCase : Any=300 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : int=6 , UpperCAmelCase : Any=1024 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.2_5 , UpperCAmelCase : str=False , **UpperCAmelCase : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Tuple = backbone_config.get("model_type" )
__lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : Tuple = use_timm_backbone
__lowerCamelCase : Any = backbone_config
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : Any = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Union[str, Any] = dropout
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : Dict = init_xavier_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : int = auxiliary_loss
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : int = backbone
__lowerCamelCase : Union[str, Any] = use_pretrained_backbone
__lowerCamelCase : Any = dilation
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : Tuple = encoder_n_points
__lowerCamelCase : Dict = decoder_n_points
__lowerCamelCase : Tuple = two_stage
__lowerCamelCase : Any = two_stage_num_proposals
__lowerCamelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Tuple = mask_loss_coefficient
__lowerCamelCase : Tuple = dice_loss_coefficient
__lowerCamelCase : Optional[Any] = bbox_loss_coefficient
__lowerCamelCase : List[str] = giou_loss_coefficient
__lowerCamelCase : List[Any] = eos_coefficient
__lowerCamelCase : List[Any] = focal_alpha
__lowerCamelCase : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Any ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.d_model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output | 646 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = iter(lowerCamelCase )
while True:
__lowercase = tuple(itertools.islice(lowerCamelCase , lowerCamelCase ) )
if not chunk:
return
yield chunk
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowercase = """"""
if len(lowerCamelCase ) < 2:
return dirty
for i in range(len(lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase ) & 1:
clean += "X"
return clean
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase )
return table
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = generate_table(lowerCamelCase )
__lowercase = prepare_input(lowerCamelCase )
__lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = generate_table(lowerCamelCase )
__lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 705 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
SCREAMING_SNAKE_CASE : str = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 156 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PhobertTokenizer
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
_lowerCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l à</w>"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """Tôi là VinAI Research"""
_lowerCAmelCase = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = """Tôi là VinAI Research"""
_lowerCAmelCase = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
_lowerCAmelCase = tokenizer.tokenize(lowerCamelCase )
print(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) | 156 | 1 |
from typing import Any
def A__ ( lowercase: list ) -> list[Any]:
if not input_list:
return []
A : Optional[int] =[input_list.count(lowercase ) for value in input_list]
A : Tuple =max(lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __a ( A ) -> str:
'''simple docstring'''
random.seed(A )
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = 0.9999 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 0 , UpperCamelCase__ = False , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 2 / 3 , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , torch.nn.Module ):
A__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ , )
A__ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ = True
if kwargs.get("max_value" , UpperCamelCase__ ) is not None:
A__ = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
A__ = kwargs["max_value"]
if kwargs.get("min_value" , UpperCamelCase__ ) is not None:
A__ = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
A__ = kwargs["min_value"]
A__ = list(UpperCamelCase__ )
A__ = [p.clone().detach() for p in parameters]
if kwargs.get("device" , UpperCamelCase__ ) is not None:
A__ = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
self.to(device=kwargs["device"] )
A__ = None
A__ = decay
A__ = min_decay
A__ = update_after_step
A__ = use_ema_warmup
A__ = inv_gamma
A__ = power
A__ = 0
A__ = None # set in `step()`
A__ = model_cls
A__ = model_config
@classmethod
def lowercase_ ( cls , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ , A__ = model_cls.load_config(UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ )
A__ = model_cls.from_pretrained(UpperCamelCase__ )
A__ = cls(model.parameters() , model_cls=UpperCamelCase__ , model_config=model.config )
ema_model.load_state_dict(UpperCamelCase__ )
return ema_model
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
A__ = self.model_cls.from_config(self.model_config )
A__ = self.state_dict()
state_dict.pop("shadow_params" , UpperCamelCase__ )
model.register_to_config(**UpperCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ = (1 + step) / (10 + step)
A__ = min(UpperCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
A__ = max(UpperCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , torch.nn.Module ):
A__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ , )
A__ = parameters.parameters()
A__ = list(UpperCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ = self.get_decay(self.optimization_step )
A__ = decay
A__ = 1 - decay
A__ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ = deepspeed.zero.GatheredParameters(UpperCamelCase__ , modifier_rank=UpperCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = list(UpperCamelCase__ )
for s_param, param in zip(self.shadow_params , UpperCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = [
p.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) if p.is_floating_point() else p.to(device=UpperCamelCase__ )
for p in self.shadow_params
]
def lowercase_ ( self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = [param.detach().cpu().clone() for param in parameters]
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , UpperCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ = None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = copy.deepcopy(UpperCamelCase__ )
A__ = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
A__ = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , UpperCamelCase__ ):
raise ValueError("Invalid min_decay" )
A__ = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , UpperCamelCase__ ):
raise ValueError("Invalid optimization_step" )
A__ = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , UpperCamelCase__ ):
raise ValueError("Invalid update_after_step" )
A__ = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
A__ = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
A__ = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
A__ = state_dict.get("shadow_params" , UpperCamelCase__ )
if shadow_params is not None:
A__ = shadow_params
if not isinstance(self.shadow_params , UpperCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(UpperCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 337 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
A__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng" | 337 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( _a : List[Any] , _a : List[Any] , _a : str , _a : Tuple=5 ):
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
A = torch.tensor(tokenizer.encode(_a , add_special_tokens=_a ) ).unsqueeze(0 ) # Batch size 1
A = model(_a )[0] # The last hidden-state is the first element of the output tuple
A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A = logits[0, masked_index, :]
A = logits.softmax(dim=0 )
A , A = prob.topk(k=_a , dim=0 )
A = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_a ) )] )
A = tokenizer.mask_token
A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
A = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(_a ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(_a ) , _a ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_a , _a ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase =CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase =CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase ="Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 255 |
"""simple docstring"""
from itertools import count
def _A ( _a : int = 5_0 ):
"""simple docstring"""
A = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 255 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__A )
class A__ ( __A ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
requires_backends(self , 'decord')
self.check_model_type(UpperCamelCase__)
def __lowercase ( self , lowercase=None , lowercase=None , lowercase=None) -> Any:
'''simple docstring'''
a__ : List[Any] = {}
if frame_sampling_rate is not None:
a__ : str = frame_sampling_rate
if num_frames is not None:
a__ : Union[str, Any] = num_frames
a__ : List[str] = {}
if top_k is not None:
a__ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase , **lowercase) -> Dict:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__)
def __lowercase ( self , lowercase , lowercase=None , lowercase=1) -> str:
'''simple docstring'''
if num_frames is None:
a__ : str = self.model.config.num_frames
if video.startswith('http://') or video.startswith('https://'):
a__ : Union[str, Any] = BytesIO(requests.get(UpperCamelCase__).content)
a__ : int = VideoReader(UpperCamelCase__)
videoreader.seek(0)
a__ : Dict = 0
a__ : List[str] = num_frames * frame_sampling_rate - 1
a__ : List[Any] = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa)
a__ : List[Any] = videoreader.get_batch(UpperCamelCase__).asnumpy()
a__ : int = list(UpperCamelCase__)
a__ : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=self.framework)
return model_inputs
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = self.model(**UpperCamelCase__)
return model_outputs
def __lowercase ( self , lowercase , lowercase=5) -> Tuple:
'''simple docstring'''
if top_k > self.model.config.num_labels:
a__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
a__ : List[Any] = model_outputs.logits.softmax(-1)[0]
a__ , a__ : Tuple = probs.topk(UpperCamelCase__)
else:
raise ValueError(F'Unsupported framework: {self.framework}')
a__ : List[str] = scores.tolist()
a__ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__)]
| 302 |
'''simple docstring'''
import argparse
import os
import re
__lowerCAmelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
__lowerCAmelCase = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCAmelCase = re.compile(R"\[([^\]]+)\]")
def __UpperCamelCase ( lowercase_ : Any ):
"""simple docstring"""
a_ = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : int="" , lowercase_ : Union[str, Any]=None , lowercase_ : str=None ):
"""simple docstring"""
a_ = 0
a_ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
a_ = ['\n'.join(lines[:index] )]
else:
a_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
a_ = [lines[index + 1]]
index += 1
else:
a_ = []
else:
blocks.append('\n'.join(lowercase_ ) )
a_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append('\n'.join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
def _inner(lowercase_ : Union[str, Any] ):
return key(lowercase_ ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int=None ):
"""simple docstring"""
def noop(lowercase_ : str ):
return x
if key is None:
a_ = noop
# Constants are all uppercase, they go first.
a_ = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
a_ = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
a_ = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def __UpperCamelCase ( lowercase_ : Optional[Any] ):
"""simple docstring"""
def _replace(lowercase_ : List[Any] ):
a_ = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
a_ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase_ )] ) + "]"
a_ = import_statement.split('\n' )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ = 2 if lines[1].strip() == '[' else 1
a_ = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
a_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
a_ = get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
a_ = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def __UpperCamelCase ( lowercase_ : Dict , lowercase_ : List[Any]=True ):
"""simple docstring"""
with open(lowercase_ , 'r' ) as f:
a_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ = split_code_in_indented_blocks(
lowercase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ = main_blocks[block_idx]
a_ = block.split('\n' )
# Get to the start of the imports.
a_ = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ = '\n'.join(block_lines[line_idx:-1] )
a_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
a_ = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ = 0
a_ = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
a_ = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowercase_ , 'w' ) as f:
f.write('\n'.join(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : int=True ):
"""simple docstring"""
a_ = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
a_ = sort_imports(os.path.join(lowercase_ , '__init__.py' ) , check_only=lowercase_ )
if result:
a_ = [os.path.join(lowercase_ , '__init__.py' )]
if len(lowercase_ ) > 0:
raise ValueError(F'Would overwrite {len(lowercase_ )} files, run `make style`.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 536 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }") | 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}") | 26 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Dict = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['MobileNetV2FeatureExtractor']
a__ : List[str] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | """simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCAmelCase : bool = True , UpperCAmelCase : str=7 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : int=400 , UpperCAmelCase : Any=3 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = do_resize
__lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 288}
__lowerCamelCase : List[Any] = size_divisor
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[int] = do_normalize
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Tuple = image_std
__lowerCamelCase : Any = do_pad
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Optional[int] = min_resolution
__lowerCamelCase : Optional[int] = max_resolution
def lowerCamelCase__ ( self : Tuple ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=False ):
if not batched:
__lowerCamelCase : List[str] = self.size["shortest_edge"]
__lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase , __lowerCamelCase : str = image.size
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__lowerCamelCase : Optional[Any] = size / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
__lowerCamelCase , __lowerCamelCase : int = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = scale * h, size
__lowerCamelCase : Optional[Any] = int((1333 / 800) * size )
if max(UpperCAmelCase , UpperCAmelCase ) > max_size:
__lowerCamelCase : Union[str, Any] = max_size / max(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = newh * scale
__lowerCamelCase : str = neww * scale
__lowerCamelCase , __lowerCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCamelCase , __lowerCamelCase : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCamelCase : Optional[int] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image processor
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 646 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = [0] * len(_UpperCamelCase )
_lowercase: int = []
_lowercase: Union[str, Any] = []
_lowercase: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
_lowercase: List[Any] = queue.pop(0 )
cnt += 1
topo.append(_UpperCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
if cnt != len(_UpperCamelCase ):
print('''Cycle exists''' )
else:
print(_UpperCamelCase )
# Adjacency List of Graph
A__ : Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 272 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Any = pytest.mark.integration
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self ) -> int:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
_lowercase: List[str] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
_lowercase: Dict = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
_lowercase: Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: List[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowercase: List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_lowercase: int = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=A_ )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> Any:
"""simple docstring"""
import faiss
_lowercase: str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: int = 1
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa )[::-1]
_lowercase , _lowercase: str = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
_lowercase: Tuple = [scores[0] for scores in total_scores]
_lowercase: Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import faiss
_lowercase: Union[str, Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowercase: Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
_lowercase: List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Any = faiss.IndexFlat(5 )
_lowercase: List[Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self ) -> str:
"""simple docstring"""
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
_lowercase: Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowercase: Optional[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Tuple = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
import faiss
_lowercase: Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Tuple = '''index.faiss'''
_lowercase: str = f'''mock://{index_name}'''
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: List[Any] = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Dict = 1
_lowercase , _lowercase: str = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: int = Elasticsearch()
_lowercase: Tuple = {'''acknowledged''': True}
_lowercase: Tuple = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_lowercase: Dict = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowercase: Optional[int] = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: List[Any] = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowercase: Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_lowercase: str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ )
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
_lowercase: List[str] = ['''foo''', '''bar''', '''foobar''']
_lowercase: Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ , request_timeout=30 )
_lowercase: Optional[Any] = [scores[0] for scores in total_scores]
_lowercase: Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 272 | 1 |
from sklearn.metrics import recall_score
import datasets
lowercase_ : int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
lowercase_ : str = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
lowercase_ : Optional[int] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def UpperCamelCase_ ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=1 , lowerCAmelCase="binary" , lowerCAmelCase=None , lowerCAmelCase="warn" , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 64 | '''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__UpperCAmelCase: Optional[int] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Optional[int] = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """sgugger/tiny-distilbert-classification"""
__UpperCAmelCase: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , only_pretrain_model=snake_case_ , )
__UpperCAmelCase: Optional[Any] = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , torchscript=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Optional[Any] = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , fpaa=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Union[str, Any] = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Optional[int] = AutoConfig.from_pretrained(snake_case_ )
# set architectures equal to `None`
__UpperCAmelCase: int = None
__UpperCAmelCase: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: int = PyTorchBenchmark(snake_case_ , configs=[config] )
__UpperCAmelCase: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: List[str] = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=snake_case_ , multi_process=snake_case_ , )
__UpperCAmelCase: Tuple = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Optional[Any] = AutoConfig.from_pretrained(snake_case_ )
__UpperCAmelCase: int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Tuple = PyTorchBenchmark(snake_case_ , configs=[config] )
__UpperCAmelCase: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """sshleifer/tinier_bart"""
__UpperCAmelCase: Optional[Any] = AutoConfig.from_pretrained(snake_case_ )
__UpperCAmelCase: int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: List[str] = PyTorchBenchmark(snake_case_ , configs=[config] )
__UpperCAmelCase: Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """sshleifer/tiny-gpt2"""
__UpperCAmelCase: Optional[int] = AutoConfig.from_pretrained(snake_case_ )
__UpperCAmelCase: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Union[str, Any] = PyTorchBenchmark(snake_case_ , configs=[config] )
__UpperCAmelCase: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = """sshleifer/tinier_bart"""
__UpperCAmelCase: List[str] = AutoConfig.from_pretrained(snake_case_ )
__UpperCAmelCase: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
__UpperCAmelCase: Any = PyTorchBenchmark(snake_case_ , configs=[config] )
__UpperCAmelCase: int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , save_to_csv=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case_ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(snake_case_ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(snake_case_ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(snake_case_ , """train_time.csv""" ) , env_info_csv_file=os.path.join(snake_case_ , """env.csv""" ) , multi_process=snake_case_ , )
__UpperCAmelCase: str = PyTorchBenchmark(snake_case_ )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """env.csv""" ) ).exists() )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(snake_case_ ):
self.assertTrue(hasattr(snake_case_ , """sequential""" ) )
self.assertTrue(hasattr(snake_case_ , """cumulative""" ) )
self.assertTrue(hasattr(snake_case_ , """current""" ) )
self.assertTrue(hasattr(snake_case_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case_ , """log.txt""" ) , log_print=snake_case_ , trace_memory_line_by_line=snake_case_ , multi_process=snake_case_ , )
__UpperCAmelCase: int = PyTorchBenchmark(snake_case_ )
__UpperCAmelCase: Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(snake_case_ , """log.txt""" ) ).exists() ) | 523 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = '''facebook/bart-large-mnli'''
UpperCAmelCase__ : Optional[Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase__ : List[str] = '''text_classifier'''
UpperCAmelCase__ : List[str] = AutoTokenizer
UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification
UpperCAmelCase__ : int = ['''text''', ['''text''']]
UpperCAmelCase__ : Dict = ['''text''']
def lowerCamelCase__( self :List[str] ) -> int:
super().setup()
a__ = self.model.config
a__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
a__ = int(__snake_case )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Dict ) -> int:
a__ = labels
return self.pre_processor(
[text] * len(__snake_case ) ,[F'This example is {label}' for label in labels] ,return_tensors='pt' ,padding='max_length' ,)
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ = outputs.logits
a__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Any:
"""simple docstring"""
a = old_name
if "patch_embed" in old_name:
a , a , a = old_name.split('''.''' )
if layer == "0":
a = old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
a = old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
a = old_name.replace('''3''', '''convolution2''' )
else:
a = old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''', snake_case_ ):
a = r'''\b\d{2}\b'''
if bool(re.search(snake_case_, snake_case_ ) ):
a = re.search(r'''\d\.\d\d.''', snake_case_ ).group()
else:
a = re.search(r'''\d\.\d.''', snake_case_ ).group()
if int(match[0] ) < 6:
a = old_name.replace(snake_case_, '''''' )
a = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
a = '''intermediate_stages.''' + trimmed_name
else:
a = old_name.replace(snake_case_, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
a = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
a = str(int(match[2] ) - num_meta4D_last_stage )
a = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
a = trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
a = trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
a = trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
a = trimmed_name.replace('''fc2''', '''linear_out''' )
a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', snake_case_ ):
a = old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
a = new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a = new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a = new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
a = new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
a = new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
a = new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a = new_name.replace('''norm''', '''layernorm''' )
a = '''efficientformer.''' + new_name
else:
a = '''efficientformer.encoder.''' + new_name
return new_name
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
for key in checkpoint.copy().keys():
a = checkpoint.pop(snake_case_ )
a = val
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(snake_case_, stream=snake_case_ ).raw )
return image
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = torch.load(snake_case_, map_location='''cpu''' )['''model''']
a = EfficientFormerConfig.from_json_file(snake_case_ )
a = EfficientFormerForImageClassificationWithTeacher(snake_case_ )
a = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
a = config.depths[-1] - config.num_metaad_blocks + 1
a = convert_torch_checkpoint(snake_case_, snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
a = prepare_img()
a = 2_5_6
a = 2_2_4
a = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
a = processor(images=snake_case_, return_tensors='''pt''' ).pixel_values
# original processing pipeline
a = Compose(
[
Resize(snake_case_, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(snake_case_ ),
ToTensor(),
Normalize(snake_case_, snake_case_ ),
] )
a = image_transforms(snake_case_ ).unsqueeze(0 )
assert torch.allclose(snake_case_, snake_case_ )
a = model(snake_case_ )
a = outputs.logits
a = (1, 1_0_0_0)
if "l1" in model_name:
a = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0], snake_case_, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0], snake_case_, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(snake_case_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=snake_case_, )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=snake_case_, )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 387 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = 32
a = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__lowerCamelCase ,projection_dim=__lowerCamelCase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
a = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=__lowerCamelCase ,num_layers=1 ,)
torch.manual_seed(0 )
a = DDPMScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=10_00 ,clip_sample=__lowerCamelCase ,clip_sample_range=5.0 ,beta_schedule='''squaredcos_cap_v2''' ,)
# regular denoising components
torch.manual_seed(0 )
a = StableUnCLIPImageNormalizer(embedding_dim=__lowerCamelCase )
a = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__lowerCamelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
a = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='''projection''' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=__lowerCamelCase ,layers_per_block=1 ,upcast_attention=__lowerCamelCase ,use_linear_projection=__lowerCamelCase ,)
torch.manual_seed(0 )
a = DDIMScheduler(
beta_schedule='''scaled_linear''' ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type='''v_prediction''' ,set_alpha_to_one=__lowerCamelCase ,steps_offset=1 ,)
torch.manual_seed(0 )
a = AutoencoderKL()
a = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : int=0 ):
'''simple docstring'''
if str(__lowerCamelCase ).startswith('''mps''' ):
a = torch.manual_seed(__lowerCamelCase )
else:
a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowerCamelCase )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
a = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' ,torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = torch.Generator(device='''cpu''' ).manual_seed(0 )
a = pipe('''anime turle''' ,generator=__lowerCamelCase ,output_type='''np''' )
a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' ,torch_dtype=torch.floataa )
a = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = pipe(
'''anime turtle''' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='''np''' ,)
a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 387 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __snake_case ( UpperCAmelCase_):
_lowerCAmelCase = 'transfo-xl'
_lowerCAmelCase = ['mems']
_lowerCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, A=26_7735, A=[2_0000, 4_0000, 20_0000], A=1024, A=1024, A=16, A=64, A=4096, A=4, A=False, A=18, A=1600, A=1000, A=True, A=True, A=0, A=-1, A=True, A=0.1, A=0.0, A=True, A="normal", A=0.01, A=0.01, A=0.02, A=1e-5, A=0, **A, ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : str = []
self.cutoffs.extend(_lowercase )
if proj_share_all_but_first:
lowerCamelCase : List[Any] = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCamelCase : str = d_model
lowerCamelCase : int = d_embed
lowerCamelCase : Dict = d_head
lowerCamelCase : Optional[int] = d_inner
lowerCamelCase : int = div_val
lowerCamelCase : List[str] = pre_lnorm
lowerCamelCase : Any = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Tuple = mem_len
lowerCamelCase : Optional[int] = same_length
lowerCamelCase : List[str] = attn_type
lowerCamelCase : Optional[int] = clamp_len
lowerCamelCase : int = sample_softmax
lowerCamelCase : List[str] = adaptive
lowerCamelCase : List[str] = dropout
lowerCamelCase : Union[str, Any] = dropatt
lowerCamelCase : int = untie_r
lowerCamelCase : Optional[Any] = init
lowerCamelCase : Union[str, Any] = init_range
lowerCamelCase : Any = proj_init_std
lowerCamelCase : List[Any] = init_std
lowerCamelCase : Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowercase, **_lowercase )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 707 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''')
if not ops[op](version.parse(UpperCAmelCase__) , version.parse(UpperCAmelCase__)):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None):
lowerCamelCase : List[Any] = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCAmelCase__):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = requirement, None, None
else:
lowerCamelCase : Optional[Any] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F''' got {requirement}''')
lowerCamelCase , lowerCamelCase : Dict = match[0]
lowerCamelCase : Dict = want_full.split(',') # there could be multiple requirements
lowerCamelCase : Union[str, Any] = {}
for w in want_range:
lowerCamelCase : int = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F''' but got {requirement}''')
lowerCamelCase , lowerCamelCase : List[Any] = match[0]
lowerCamelCase : Optional[int] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
lowerCamelCase : Optional[int] = '.'.join([str(UpperCAmelCase__) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
return
# check if any version is installed
try:
lowerCamelCase : Any = importlib.metadata.version(UpperCAmelCase__)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : List[str] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCAmelCase__ , UpperCAmelCase__)
| 449 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = IFPipeline
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self : int ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self : int ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
# if
_A = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_A , _A = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 7 |
import qiskit
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
snake_case : Dict = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
snake_case : Dict = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 204 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_: Dict = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[str] = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowercase_: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_: int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Union[str, Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127 | 0 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = data
_snake_case = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def UpperCamelCase( lowerCamelCase , lowerCamelCase ):
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def UpperCamelCase( self ):
_snake_case = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
_snake_case = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = list(struct.unpack(">16L" , lowerCamelCase ) ) + [0] * 64
for i in range(16 , 80 ):
_snake_case = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase( self ):
_snake_case = self.padding()
_snake_case = self.split_blocks()
for block in self.blocks:
_snake_case = self.expand_block(lowerCamelCase )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_snake_case = (b & c) | ((~b) & d)
_snake_case = 0x5A82_7999
elif 20 <= i < 40:
_snake_case = b ^ c ^ d
_snake_case = 0x6ED9_EBA1
elif 40 <= i < 60:
_snake_case = (b & c) | (b & d) | (c & d)
_snake_case = 0x8F1B_BCDC
elif 60 <= i < 80:
_snake_case = b ^ c ^ d
_snake_case = 0xCA62_C1D6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (
self.rotate(lowerCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(lowerCamelCase , 30 ),
c,
d,
)
_snake_case = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = b"Test String"
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def snake_case_ ( ):
'''simple docstring'''
_snake_case = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_snake_case = parser.parse_args()
_snake_case = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_snake_case = f.read()
else:
_snake_case = bytes(SCREAMING_SNAKE_CASE__ , "utf-8" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 1 |
from math import pi
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 701 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase__ : List[str] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase__ : int = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase__ : Any = orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase__ : Dict = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase__ : Tuple = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
UpperCAmelCase__ : Optional[int] = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase__ : Dict = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = orig_state_dict.pop(__UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase__ : Dict = val
UpperCAmelCase__ : Dict = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase__ : Optional[Any] = torch.arange(__UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase__ : List[str] = YosoConfig.from_json_file(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = YosoForMaskedLM(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , __UpperCamelCase )
print(model.load_state_dict(__UpperCamelCase ) )
model.eval()
model.save_pretrained(__UpperCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 194 | 0 |
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( snake_case_ : Callable , snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ):
snake_case__ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : Tuple = np.zeros((n + 1,) )
snake_case__ : Dict = ya
snake_case__ : List[str] = xa
for k in range(snake_case_ ):
snake_case__ : Optional[int] = y[k] + step_size * ode_func(snake_case_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : List[str] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : List[Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__lowerCamelCase : List[str] = """▁"""
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = AlbertTokenizer
def __init__( self : List[Any] , __A : Optional[int]=None , __A : Any=None , __A : Optional[Any]=True , __A : List[Any]=True , __A : Tuple=False , __A : str="[CLS]" , __A : int="[SEP]" , __A : Optional[int]="<unk>" , __A : List[str]="[SEP]" , __A : Optional[Any]="<pad>" , __A : Union[str, Any]="[CLS]" , __A : Optional[Any]="[MASK]" , **__A : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case__ : Optional[Any] = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
snake_case__ : Any = do_lower_case
snake_case__ : int = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : str = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 297 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : str = nn.BatchNormad(4 )
A__ : Tuple = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : Dict , snake_case : Union[str, Any] , *snake_case : Optional[Any] , **snake_case : str ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : Dict , snake_case : int , snake_case : int ):
'''simple docstring'''
return output + 1
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = ModelForTest()
A__ : Tuple = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(test_model._hf_hook , __UpperCamelCase )
self.assertTrue(hasattr(__UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(__UpperCamelCase , """_old_forward""" ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = ModelForTest()
A__ : Dict = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase , append=__UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(__UpperCamelCase , """_old_forward""" ) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = ModelForTest()
A__ : List[str] = torch.randn(2 , 3 )
A__ : List[str] = test_model(x + 1 )
A__ : List[Any] = test_model(x + 2 )
A__ : Optional[int] = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ : Union[str, Any] = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ : Dict = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : int = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = ModelForTest()
A__ : Any = torch.randn(2 , 3 )
A__ : Tuple = test_model(__UpperCamelCase )
A__ : Optional[Any] = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : List[str] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ : Optional[int] = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , output + 2 , atol=1e-5 )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
A__ : Dict = torch.randn(2 , 3 )
A__ : int = test_model(__UpperCamelCase )
A__ : Optional[int] = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A__ : int = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
A__ : Union[str, Any] = True
A__ : Union[str, Any] = test_model(__UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A__ : Dict = torch.randn(2 , 3 )
A__ : Union[str, Any] = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCamelCase , AlignDevicesHook(io_same_device=__UpperCamelCase ) )
A__ : str = torch.randn(2 , 3 ).to(0 )
A__ : List[str] = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : Union[str, Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Optional[int] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A__ : Any = torch.randn(2 , 3 )
A__ : int = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A__ : Tuple = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : Optional[int] = torch.randn(2 , 3 )
A__ : int = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Any = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A__ : int = torch.randn(2 , 3 )
A__ : Optional[int] = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , offload_buffers=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : List[str] = torch.randn(2 , 3 )
A__ : int = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : Optional[Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Union[str, Any] = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A__ : Union[str, Any] = torch.randn(2 , 3 )
A__ : Tuple = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() , offload_buffers=__UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : List[str] = torch.randn(2 , 3 )
A__ : str = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 715 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ={}
__lowercase =2
while True:
__lowercase =factor_map.pop(lowercase__, lowercase__ )
if factor:
__lowercase =factor + prime
while x in factor_map:
x += factor
__lowercase =factor
else:
__lowercase =prime
yield prime
prime += 1
def __UpperCamelCase ( lowercase__ : float = 1E10 ):
'''simple docstring'''
__lowercase =sieve()
__lowercase =1
while True:
__lowercase =next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 119 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(0, lowercase__ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(lowercase__, 0, -1 ):
for _ in range(lowercase__, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowercase__ ) # upper half
reverse_floyd(lowercase__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
UpperCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 119 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(snake_case , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def A_ ( self : Union[str, Any] , snake_case : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Any=None ) -> Optional[int]:
'''simple docstring'''
A = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
A = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A = black.format_str(snake_case , mode=snake_case )
A = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(snake_case , 'w' , newline='\n' ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case )
with open(snake_case , 'r' ) as f:
self.assertTrue(f.read() , snake_case )
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(snake_case , snake_case )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , snake_case ) , )
# Copy consistency with a really long name
A = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , snake_case , snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , snake_case , overwrite_result=re.sub('DDPM' , 'Test' , snake_case ) , )
| 709 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A = ['bert-base-uncased', 'bert-base-cased']
A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class UpperCAmelCase__ ( tf.keras.Model ):
def __init__( self : Tuple , snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__()
A = tokenizer
A = AutoConfig.from_pretrained(snake_case )
A = TFAutoModel.from_config(snake_case )
def A_ ( self : Any , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
A = self.tokenizer(snake_case )
A = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
A = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
A = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
A = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
A = tokenizer(snake_case , return_tensors='tf' , padding='longest' )
A = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = tf_tokenizer(self.paired_sentences )
A = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
A = tf.constant(snake_case )
A = compiled_tokenizer(snake_case )
A = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = ModelToSave(tokenizer=snake_case )
A = tf.convert_to_tensor(self.test_sentences )
A = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A = Path(snake_case ) / 'saved.model'
model.save(snake_case )
A = tf.keras.models.load_model(snake_case )
A = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 109 | 0 |
from string import ascii_lowercase, ascii_uppercase
def __a ( A__ : str ):
if not sentence:
return ""
SCREAMING_SNAKE_CASE = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod() | 16 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = BigBirdTokenizer
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int:
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token
__UpperCamelCase :str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token
__UpperCamelCase :Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token
__UpperCamelCase :Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__UpperCamelCase :str = vocab_file
__UpperCamelCase :int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :int = [self.sep_token_id]
__UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase):
copyfile(self.vocab_file , __lowercase)
return (out_vocab_file,)
| 167 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""gpt_neox"""
def __init__( self , _snake_case=5_0432 , _snake_case=6144 , _snake_case=44 , _snake_case=64 , _snake_case=2_4576 , _snake_case="gelu" , _snake_case=0.25 , _snake_case=1_0000 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=2048 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=0 , _snake_case=2 , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ):
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_UpperCAmelCase =vocab_size
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_act
_UpperCAmelCase =rotary_pct
_UpperCAmelCase =rotary_emb_base
_UpperCAmelCase =attention_dropout
_UpperCAmelCase =hidden_dropout
_UpperCAmelCase =classifier_dropout
_UpperCAmelCase =initializer_range
_UpperCAmelCase =layer_norm_eps
_UpperCAmelCase =use_cache
_UpperCAmelCase =tie_word_embeddings
_UpperCAmelCase =use_parallel_residual
_UpperCAmelCase =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def SCREAMING_SNAKE_CASE ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
_UpperCAmelCase =self.rope_scaling.get("type" , _snake_case )
_UpperCAmelCase =self.rope_scaling.get("factor" , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 592 |
def lowerCamelCase__ ( _lowerCamelCase = 50 ) ->int:
_UpperCAmelCase =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 592 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase ):
__a = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__a = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__a = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 559 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = 0 ):
__a = length or len(__lowerCamelCase )
__a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__a , __a = list_data[i + 1], list_data[i]
__a = True
return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 1 |
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase : np.array ) -> List[Any]:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_SCREAMING_SNAKE_CASE : int = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_SCREAMING_SNAKE_CASE : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_SCREAMING_SNAKE_CASE : Optional[int] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''allenai'''
def lowerCamelCase__ ( _lowerCamelCase : int ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowerCamelCase_ = d[k] # restore
return da
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ) -> Optional[Any]:
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase_ = basename(_lowerCamelCase )
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ = cls.hub_models()
lowerCamelCase_ = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase_ = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
lowerCamelCase_ = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = vars(chkpt['args']['model'] )
lowerCamelCase_ = args['source_lang']
lowerCamelCase_ = args['target_lang']
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = basename(_lowerCamelCase )
# dicts
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{src_lang}.txt''' )
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{tgt_lang}.txt''' )
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ = False
break
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase_ = fin.read()
lowerCamelCase_ = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase_ = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase_ = 5
lowerCamelCase_ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase_ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase_ = chkpt['models'][0]
lowerCamelCase_ = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase_ = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 137 | 0 |
import numpy as np
from PIL import Image
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__SCREAMING_SNAKE_CASE : Any = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
return updated_arr
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__SCREAMING_SNAKE_CASE : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__lowerCAmelCase : Tuple =Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 696 |
"""simple docstring"""
from collections.abc import Callable
def lowercase (SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
SCREAMING_SNAKE_CASE = a
SCREAMING_SNAKE_CASE = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
SCREAMING_SNAKE_CASE = mid
else:
SCREAMING_SNAKE_CASE = mid
SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
return mid
def lowercase (SCREAMING_SNAKE_CASE_ : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 247 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A : int = AltDiffusionPipeline
A : List[str] = TEXT_TO_IMAGE_PARAMS
A : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
A : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase (self ) -> int:
torch.manual_seed(0 )
lowercase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowercase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase_ : Any = CLIPTextModel(_a )
lowercase_ : str = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase_ : List[Any] = 77
lowercase_ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase (self , _a , _a=0 ) -> List[Any]:
if str(_a ).startswith('mps' ):
lowercase_ : int = torch.manual_seed(_a )
else:
lowercase_ : Any = torch.Generator(device=_a ).manual_seed(_a )
lowercase_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase (self ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCamelCase (self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase (self ) -> Any:
lowercase_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Dict = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : Optional[Any] = RobertaSeriesModelWithTransformation(_a )
lowercase_ : Optional[Any] = text_encoder
lowercase_ : Dict = AltDiffusionPipeline(**_a )
lowercase_ : List[str] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
lowercase_ : Optional[int] = self.get_dummy_inputs(_a )
lowercase_ : List[str] = 'A photo of an astronaut'
lowercase_ : Optional[Any] = alt_pipe(**_a )
lowercase_ : Any = output.images
lowercase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : str = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Optional[Any] = self.get_dummy_components()
lowercase_ : Tuple = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(_a )
lowercase_ : List[Any] = text_encoder
lowercase_ : List[Any] = AltDiffusionPipeline(**_a )
lowercase_ : str = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
lowercase_ : int = self.get_dummy_inputs(_a )
lowercase_ : str = alt_pipe(**_a )
lowercase_ : Any = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Any = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase (self ) -> Tuple:
# make sure here that pndm scheduler skips prk
lowercase_ : List[Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_a )
lowercase_ : Dict = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
lowercase_ : Optional[int] = 'A painting of a squirrel eating a burger'
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : List[Any] = alt_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
lowercase_ : Tuple = output.images
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Optional[int] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase (self ) -> str:
lowercase_ : Tuple = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
lowercase_ : int = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_a , safety_checker=_a )
lowercase_ : List[str] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
lowercase_ : List[Any] = 'A painting of a squirrel eating a burger'
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Any = alt_pipe([prompt] , generator=_a , num_inference_steps=2 , output_type='numpy' )
lowercase_ : Union[str, Any] = output.images
lowercase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Any = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 720 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase_ : List[Any] = 128
elif "12-12" in model_name:
lowercase_ : Tuple = 12
lowercase_ : List[Any] = 12
elif "14-14" in model_name:
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase_ : Union[str, Any] = 16
lowercase_ : List[str] = 16
else:
raise ValueError('Model not supported' )
lowercase_ : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase_ : List[str] = 35
lowercase_ : int = 'speech-commands-v2-id2label.json'
else:
lowercase_ : Union[str, Any] = 527
lowercase_ : int = 'audioset-id2label.json'
lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[int] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if "module.v" in name:
lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : List[str] = key.split('.' )
lowercase_ : int = int(key_split[3] )
lowercase_ : Tuple = config.hidden_size
if "weight" in key:
lowercase_ : Tuple = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Optional[int] = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[:dim]
lowercase_ : Any = val[dim : dim * 2]
lowercase_ : Tuple = val[-dim:]
else:
lowercase_ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase_ : Dict = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128
lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase_ : Any = dataset[0]['audio']['array']
else:
lowercase_ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = waveform.squeeze().numpy()
lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 507 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Tuple = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE : List[Any] = 1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[int] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 + _cos) / 2
SCREAMING_SNAKE_CASE : str = -1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Dict = -2 * _cos
SCREAMING_SNAKE_CASE : int = 1 - alpha
SCREAMING_SNAKE_CASE : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[int] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[str] = _sin / 2
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = -ba
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : List[str] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : str = 1 - alpha
SCREAMING_SNAKE_CASE : List[str] = -2 * _cos
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha
SCREAMING_SNAKE_CASE : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : str = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + alpha * big_a
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha * big_a
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha / big_a
SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha / big_a
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Any = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Any = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[str] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : Dict = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE : Tuple = 2 * big_a * mpc
SCREAMING_SNAKE_CASE : List[Any] = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE : Dict = ppmc + aaa
SCREAMING_SNAKE_CASE : int = -2 * pmpc
SCREAMING_SNAKE_CASE : Dict = ppmc - aaa
SCREAMING_SNAKE_CASE : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : str = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Dict = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE : Any = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE : Optional[int] = pmc + aaa
SCREAMING_SNAKE_CASE : List[Any] = 2 * mpc
SCREAMING_SNAKE_CASE : str = pmc - aaa
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 507 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase (unittest.TestCase ):
_snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_snake_case = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Optional[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
_lowercase : Dict = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_lowercase : Tuple = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
_lowercase : str = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_lowercase : str = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
_lowercase : List[Any] = text_classifier('This is great !' , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_lowercase : Optional[Any] = text_classifier('This is great !' , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
_lowercase : Any = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_lowercase : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
import torch
_lowercase : Optional[int] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
_lowercase : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : str = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
_lowercase : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[str] = pipeline('text-classification' )
_lowercase : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_lowercase : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_lowercase : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Tuple = pipeline('text-classification' , framework='tf' )
_lowercase : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_lowercase : str = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_lowercase : List[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
_lowercase : str = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Any = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowercase : Dict = 'HuggingFace is in'
_lowercase : Optional[int] = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
_lowercase : Dict = ['HuggingFace is in ', 'Paris is in France']
_lowercase : Optional[int] = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}, {'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowercase : List[str] = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
_lowercase : str = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}] * N, [{'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}] * N] , )
_lowercase : str = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_lowercase : int = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowercase : Dict = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowercase : Any = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'label': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) | 705 | """simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ""
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : Union[str, Any] = fsspec.open(
lowerCamelCase_ , mode='rb' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
_lowercase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : str = None
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowercase : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowercase : int = {f['name']: f}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : str=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "bz2"
_snake_case = "bz2"
_snake_case = ".bz2"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "gzip"
_snake_case = "gzip"
_snake_case = ".gz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "lz4"
_snake_case = "lz4"
_snake_case = ".lz4"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "xz"
_snake_case = "xz"
_snake_case = ".xz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "zstd"
_snake_case = "zstd"
_snake_case = ".zst"
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : Any = self.file.__enter__
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self : str ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowercase : Optional[int] = fixed_enter
| 283 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=18 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : Any=4_00 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : List[str]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : List[str] = size_divisor
SCREAMING_SNAKE_CASE : Tuple = do_rescale
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = GLPNImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_rescale""" ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 379 |
'''simple docstring'''
import random
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = num - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : List[str] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE : List[Any] = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE : List[Any] = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if v != 1:
SCREAMING_SNAKE_CASE : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE : Any = i + 1
SCREAMING_SNAKE_CASE : str = (v**2) % num
return True
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase_ )
def __A ( lowerCamelCase_ = 10_24 ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 379 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = [[] for _ in range(UpperCAmelCase__ )]
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
A_ = ["""""".join(UpperCAmelCase__ ) for row in temp_grid]
A_ = """""".join(UpperCAmelCase__ )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = []
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
A_ = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
A_ = 0
for row in temp_grid: # fills in the characters
A_ = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
A_ = """""" # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> dict[int, str]:
A_ = {}
for key_guess in range(1, len(UpperCAmelCase__ ) ): # tries every key
A_ = decrypt(UpperCAmelCase__, UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( _lowerCamelCase ):
'''simple docstring'''
a : Dict = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = None, __magic_name__ = PILImageResampling.BICUBIC, __magic_name__ = True, __magic_name__ = 1 / 255, __magic_name__ = True, __magic_name__ = None, __magic_name__ = None, __magic_name__ = True, **__magic_name__, ) -> None:
"""simple docstring"""
super().__init__(**A__ )
UpperCamelCase__ : Dict = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCamelCase__ : Tuple = get_size_dict(A__, default_to_square=A__ )
UpperCamelCase__ : Union[str, Any] = do_resize
UpperCamelCase__ : List[str] = size
UpperCamelCase__ : Optional[int] = resample
UpperCamelCase__ : str = do_rescale
UpperCamelCase__ : Optional[int] = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_normalize
UpperCamelCase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = PILImageResampling.BICUBIC, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Any = get_size_dict(A__, default_to_square=A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
UpperCamelCase__ : Optional[Any] = (size['''height'''], size['''width'''])
return resize(A__, size=A__, resample=A__, data_format=A__, **A__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> int:
"""simple docstring"""
return rescale(A__, scale=A__, data_format=A__, **A__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
return normalize(A__, mean=A__, std=A__, data_format=A__, **A__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCamelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ : Union[str, Any] = size if size is not None else self.size
UpperCamelCase__ : Optional[int] = get_size_dict(A__, default_to_square=A__ )
UpperCamelCase__ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ : int = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = [to_numpy_array(A__ ) for image in images]
if do_resize:
UpperCamelCase__ : Optional[Any] = [self.resize(image=A__, size=A__, resample=A__ ) for image in images]
if do_rescale:
UpperCamelCase__ : Dict = [self.rescale(image=A__, scale=A__ ) for image in images]
if do_normalize:
UpperCamelCase__ : List[Any] = [self.normalize(image=A__, mean=A__, std=A__ ) for image in images]
UpperCamelCase__ : Tuple = [to_channel_dimension_format(A__, A__ ) for image in images]
UpperCamelCase__ : str = BatchFeature(data={'''pixel_values''': images}, tensor_type=A__ )
return encoded_outputs
| 253 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCamelCase__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
_SCREAMING_SNAKE_CASE = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_SCREAMING_SNAKE_CASE = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=SCREAMING_SNAKE_CASE_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=SCREAMING_SNAKE_CASE_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_SCREAMING_SNAKE_CASE = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=SCREAMING_SNAKE_CASE_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
_SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
_SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
_SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
_SCREAMING_SNAKE_CASE = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_SCREAMING_SNAKE_CASE = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_SCREAMING_SNAKE_CASE = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_SCREAMING_SNAKE_CASE = """; """.join(SCREAMING_SNAKE_CASE_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_SCREAMING_SNAKE_CASE = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(SCREAMING_SNAKE_CASE_ )}" )
return
subprocess.run(SCREAMING_SNAKE_CASE_ )
print("""Successfully setup pod.""" )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tpu_command_parser()
_SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE_ )
| 591 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["DeiTFeatureExtractor"]
UpperCamelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCAmelCase ( UpperCamelCase: int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
__lowerCAmelCase = [0]
__lowerCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the triangle number corresponding to b_floor
__lowerCAmelCase = 42
# the triangle number corresponding to b_ceil
__lowerCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase = floor(UpperCamelCase )
__lowerCAmelCase = ceil(UpperCamelCase )
__lowerCAmelCase = triangle_numbers[b_floor]
__lowerCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_first_guess * triangle_a
__lowerCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_second_guess * triangle_a
__lowerCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 376 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
for param in module.parameters():
lowerCamelCase__: Tuple =False
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase__: str ="mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =datetime.now()
lowerCamelCase__: str =current_time.strftime("%H:%M:%S" )
return timestamp
| 59 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "informer"
__a : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "student_t" ,snake_case__ = "nll" ,snake_case__ = 1 ,snake_case__ = None ,snake_case__ = "mean" ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = 64 ,snake_case__ = 32 ,snake_case__ = 32 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = True ,snake_case__ = "gelu" ,snake_case__ = 0.05 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 100 ,snake_case__ = 0.02 ,snake_case__=True ,snake_case__ = "prob" ,snake_case__ = 5 ,snake_case__ = True ,**snake_case__ ,):
# time series specific configuration
SCREAMING_SNAKE_CASE_ : Dict = prediction_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = context_length or prediction_length
SCREAMING_SNAKE_CASE_ : Tuple = distribution_output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_size
SCREAMING_SNAKE_CASE_ : Dict = num_time_features
SCREAMING_SNAKE_CASE_ : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ : List[str] = scaling
SCREAMING_SNAKE_CASE_ : int = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ : Optional[int] = num_static_real_features
SCREAMING_SNAKE_CASE_ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cardinality
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : int = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ : int = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = dropout
SCREAMING_SNAKE_CASE_ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE_ : Dict = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : int = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Dict = activation_function
SCREAMING_SNAKE_CASE_ : str = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = use_cache
# Informer
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_factor
SCREAMING_SNAKE_CASE_ : str = distil
super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
a_ : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a_ : List[Any] = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = CamembertTokenizer
_lowercase : Tuple = CamembertTokenizerFast
_lowercase : Dict = True
_lowercase : List[Any] = True
def _UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = CamembertTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = '''<pad>'''
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> int:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_0_0_4 )
def _UpperCAmelCase ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = CamembertTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
a__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a__ = '''I was born in 92000, and this is falsé.'''
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = '''I was born in 92000, and this is falsé.'''
a__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = self.get_rust_tokenizer()
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE )
a__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self ) -> Dict:
# fmt: off
a__ = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=SCREAMING_SNAKE_CASE , )
| 194 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : int = '''align_text_model'''
def __init__( self , SCREAMING_SNAKE_CASE=3_0_5_2_2 , SCREAMING_SNAKE_CASE=7_6_8 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=3_0_7_2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = pad_token_id
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
a__ , a__ = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : str = '''align_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 6_0_0 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 3.1 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , SCREAMING_SNAKE_CASE = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , SCREAMING_SNAKE_CASE = [] , SCREAMING_SNAKE_CASE = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE = 0.25 , SCREAMING_SNAKE_CASE = "swish" , SCREAMING_SNAKE_CASE = 2_5_6_0 , SCREAMING_SNAKE_CASE = "mean" , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 0.0_01 , SCREAMING_SNAKE_CASE = 0.99 , SCREAMING_SNAKE_CASE = 0.2 , **SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE )
a__ = num_channels
a__ = image_size
a__ = width_coefficient
a__ = depth_coefficient
a__ = depth_divisor
a__ = kernel_sizes
a__ = in_channels
a__ = out_channels
a__ = depthwise_padding
a__ = strides
a__ = num_block_repeats
a__ = expand_ratios
a__ = squeeze_expansion_ratio
a__ = hidden_act
a__ = hidden_dim
a__ = pooling_type
a__ = initializer_range
a__ = batch_norm_eps
a__ = batch_norm_momentum
a__ = drop_connect_rate
a__ = sum(SCREAMING_SNAKE_CASE ) * 4
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
a__ , a__ = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Dict = '''align'''
_lowercase : Optional[Any] = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=6_4_0 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE )
if text_config is None:
a__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
a__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
a__ = AlignTextConfig(**SCREAMING_SNAKE_CASE )
a__ = AlignVisionConfig(**SCREAMING_SNAKE_CASE )
a__ = projection_dim
a__ = temperature_init_value
a__ = initializer_range
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> int:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 194 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def a_ ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ) -> Optional[Any]:
_a = ort.SessionOptions()
_a = False
return options
def a_ ( self ) -> Optional[Any]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
_a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
_a = """A red cat sitting on a park bench"""
_a = np.random.RandomState(0 )
_a = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="np" , )
_a = output.images
_a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_a = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a_ ( self ) -> Any:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
_a = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
_a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
_a = """A red cat sitting on a park bench"""
_a = np.random.RandomState(0 )
_a = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type="np" , )
_a = output.images
_a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_a = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 721 |
'''simple docstring'''
lowercase__ = 256
# Modulus to hash a string
lowercase__ = 1_000_003
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> bool:
'''simple docstring'''
_a = len(__lowerCamelCase )
_a = len(__lowerCamelCase )
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
_a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
_a = "abc1abc12"
_a = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_a = "alskfjaldsk23adsfabcabc"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) and not rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 2)
_a = "ABABX"
_a = "ABABZABABYABABX"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 3)
_a = "AAAB"
_a = "ABAAAAAB"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 4)
_a = "abcdabcy"
_a = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 5)
_a = "Lü"
_a = "Lüsai"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
_a = "Lue"
assert not rabin_karp(__lowerCamelCase , __lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 276 | 0 |
import random
from typing import Any
def __lowercase ( snake_case ):
"""simple docstring"""
for _ in range(len(snake_case ) ):
__magic_name__ :Optional[int] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ :Union[str, Any] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ , __magic_name__ :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ : int = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> str:
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase__ )
while cur > 1:
# Find the maximum number in arr
_SCREAMING_SNAKE_CASE : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_SCREAMING_SNAKE_CASE : Dict = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )]
# Reverse whole list
_SCREAMING_SNAKE_CASE : Tuple = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
lowercase_ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 295 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 295 | 1 |
"""simple docstring"""
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Any = 0
while b > 0:
if b & 1:
A__ : Union[str, Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 363 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = 0
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = Path(snake_case_ ) / """preprocessor_config.json"""
A__ : Any = Path(snake_case_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
A__ : Any = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = Path(snake_case_ ) / """preprocessor_config.json"""
A__ : Union[str, Any] = Path(snake_case_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
A__ : Optional[int] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A__ : Dict = Path(snake_case_ ) / """preprocessor_config.json"""
A__ : Optional[int] = Path(snake_case_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A__ : List[str] = AutoImageProcessor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop("""image_processor_type""" )
A__ : str = CLIPImageProcessor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
A__ : Optional[int] = AutoImageProcessor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
A__ : Dict = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = Path(snake_case_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
A__ : List[str] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , """clip-base is not a local folder and is not a valid model identifier""" ):
A__ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : Any = AutoImageProcessor.from_pretrained(snake_case_ , revision="""aaaaaa""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A__ : Optional[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
A__ : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
A__ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
A__ : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
A__ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoImageProcessor.register(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Tuple = Path(snake_case_ ) / """preprocessor_config.json"""
A__ : List[str] = Path(snake_case_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
A__ : Optional[int] = CustomImageProcessor.from_pretrained(snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
A__ : int = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
'''simple docstring'''
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = True
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
A__ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A__ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A__ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 363 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """codegen"""
lowerCamelCase_ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=50_400 , __UpperCAmelCase=2_048 , __UpperCAmelCase=2_048 , __UpperCAmelCase=4_096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50_256 , __UpperCAmelCase=50_256 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
__A : str = vocab_size
__A : Tuple = n_ctx
__A : str = n_positions
__A : str = n_embd
__A : List[Any] = n_layer
__A : Tuple = n_head
__A : str = n_inner
__A : Any = rotary_dim
__A : Dict = activation_function
__A : Tuple = resid_pdrop
__A : Optional[int] = embd_pdrop
__A : Optional[int] = attn_pdrop
__A : str = layer_norm_epsilon
__A : Union[str, Any] = initializer_range
__A : Optional[int] = use_cache
__A : Optional[Any] = bos_token_id
__A : int = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
__A : str = 0
@property
def __UpperCAmelCase( self ):
__A : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
__A : Dict = {0: "batch", 1: "past_sequence + sequence"}
else:
__A : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCAmelCase( self ):
return self._config.n_layer
@property
def __UpperCAmelCase( self ):
return self._config.n_head
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
__A : Dict = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__A : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A , __A : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A : str = seqlen + 2
__A : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Tuple = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
__A : Dict = common_inputs["attention_mask"]
if self.use_past:
__A : Union[str, Any] = ordered_inputs["attention_mask"].dtype
__A : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase( self ):
return 13
| 387 | import os
import string
import sys
UpperCamelCase = 1 << 8
UpperCamelCase = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCamelCase = KEYMAP['up']
UpperCamelCase = KEYMAP['left']
if sys.platform == "win32":
UpperCamelCase = []
UpperCamelCase = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCamelCase = ord(str(i))
def lowerCamelCase_ ( ) -> Tuple:
if os.name == "nt":
import msvcrt
__A : Optional[int] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
__A : Union[str, Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__A : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__A : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__A : Tuple = chr(KEYMAP["esc"] )
except KeyError:
__A : Union[str, Any] = cha[1]
else:
__A : Optional[int] = ch.decode(_lowercase )
else:
__A : Dict = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__A : str = sys.stdin.fileno()
__A : Tuple = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
__A : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def lowerCamelCase_ ( ) -> Union[str, Any]:
__A : Any = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
__A : Tuple = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
__A : Optional[int] = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 387 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowerCamelCase_ ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 142 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[str] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "mctct"
def __init__( self , UpperCamelCase__=8_065 , UpperCamelCase__=1_536 , UpperCamelCase__=36 , UpperCamelCase__=6_144 , UpperCamelCase__=4 , UpperCamelCase__=384 , UpperCamelCase__=920 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.3 , UpperCamelCase__="relu" , UpperCamelCase__=0.02 , UpperCamelCase__=0.3 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=(7,) , UpperCamelCase__=(3,) , UpperCamelCase__=80 , UpperCamelCase__=1 , UpperCamelCase__=None , UpperCamelCase__="sum" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layerdrop
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = conv_glu_dim
lowerCamelCase_ = conv_dropout
lowerCamelCase_ = num_conv_layers
lowerCamelCase_ = input_feat_per_channel
lowerCamelCase_ = input_channels
lowerCamelCase_ = conv_channels
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ = list(UpperCamelCase__ )
lowerCamelCase_ = list(UpperCamelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 142 | 1 |
from __future__ import annotations
def _A( UpperCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : int = ['image_processor', 'tokenizer']
UpperCamelCase_ : Dict = 'OwlViTImageProcessor'
UpperCamelCase_ : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="max_length" , lowerCamelCase__ : Union[str, Any]="np" , **lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(text[0] , lowerCamelCase__ )):
__lowercase = [self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(text[0] , lowerCamelCase__ ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(lowerCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase__ ) != max_num_queries:
__lowercase = t + [''' '''] * (max_num_queries - len(lowerCamelCase__ ))
__lowercase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
encodings.append(lowerCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowercase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__lowercase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 362 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a = ""
a = ""
a = ""
a = ""
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tweepy.OAuthHandler(__UpperCAmelCase , __UpperCAmelCase )
auth.set_access_token(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tweepy.API(__UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__SCREAMING_SNAKE_CASE = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__SCREAMING_SNAKE_CASE = api.user_timeline(screen_name=__UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# save the id of the oldest tweet less one
__SCREAMING_SNAKE_CASE = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCAmelCase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
__SCREAMING_SNAKE_CASE = api.user_timeline(
screen_name=__UpperCAmelCase , count=200 , max_id=__UpperCAmelCase )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# update the id of the oldest tweet less one
__SCREAMING_SNAKE_CASE = alltweets[-1].id - 1
print(f"""...{len(__UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
__SCREAMING_SNAKE_CASE = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
__SCREAMING_SNAKE_CASE = csv.writer(__UpperCAmelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 109 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase (*SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 ) -> Optional[int]:
from .. import __version__
SCREAMING_SNAKE_CASE = take_from
SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
SCREAMING_SNAKE_CASE = None
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
values += (getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
SCREAMING_SNAKE_CASE = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE_ , stacklevel=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE = call_frame.filename
SCREAMING_SNAKE_CASE = call_frame.lineno
SCREAMING_SNAKE_CASE = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return values[0]
return values
| 247 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowerCamelCase ( lowerCamelCase_ : Tuple ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = model
UpperCAmelCase_ : List[Any] = cache
UpperCAmelCase_ : List[str] = force
UpperCAmelCase_ : List[Any] = trust_remote_code
def _UpperCamelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 713 | '''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = logging.get_verbosity()
UpperCAmelCase_ : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : int = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
UpperCAmelCase_ : List[Any] = logging.log_levels[env_level_str]
UpperCAmelCase_ : Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCAmelCase_ : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Tuple = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 389 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( __lowerCamelCase ):
def __init__( self: List[str] , UpperCamelCase: int=None , **UpperCamelCase: Tuple ) -> List[Any]:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , UpperCamelCase , )
super().__init__(args=UpperCamelCase , **UpperCamelCase )
| 328 |
import os
def a ( a = "matrix.txt" ) ->int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE = in_file.read()
SCREAMING_SNAKE_CASE = [[int(a ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE = len(grid[0] )
SCREAMING_SNAKE_CASE = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''') | 201 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
__magic_name__ = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
__magic_name__ = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
__magic_name__ = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__snake_case : Tuple = new_id
# turn into Numpy arrays
__snake_case : Optional[int] = np.array(__UpperCAmelCase )
__snake_case : Optional[int] = np.array(__UpperCAmelCase )
if reduce_labels:
__snake_case : Tuple = 2_55
__snake_case : List[str] = label - 1
__snake_case : List[str] = 2_55
__snake_case : Optional[Any] = label != ignore_index
__snake_case : str = np.not_equal(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = pred_label[mask]
__snake_case : Union[str, Any] = np.array(__UpperCAmelCase )[mask]
__snake_case : Any = pred_label[pred_label == label]
__snake_case : int = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : List[str] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : Union[str, Any] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__snake_case : Any = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
__snake_case : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : str = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ):
__snake_case : Dict = intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
__snake_case : int = total_intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# compute metrics
__snake_case : Optional[int] = {}
__snake_case : str = total_area_intersect.sum() / total_area_label.sum()
__snake_case : List[Any] = total_area_intersect / total_area_union
__snake_case : List[Any] = total_area_intersect / total_area_label
__snake_case : Union[str, Any] = np.nanmean(__UpperCAmelCase )
__snake_case : Optional[Any] = np.nanmean(__UpperCAmelCase )
__snake_case : Dict = all_acc
__snake_case : List[Any] = iou
__snake_case : int = acc
if nan_to_num is not None:
__snake_case : Tuple = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
__snake_case : Union[str, Any] = mean_iou(
results=_UpperCAmelCase , gt_seg_maps=_UpperCAmelCase , num_labels=_UpperCAmelCase , ignore_index=_UpperCAmelCase , nan_to_num=_UpperCAmelCase , label_map=_UpperCAmelCase , reduce_labels=_UpperCAmelCase , )
return iou_result
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = 'ernie_m'
__UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self : Any , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1E-05 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=0.0 , **__UpperCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = classifier_dropout
UpperCAmelCase__ = is_decoder
UpperCAmelCase__ = act_dropout
| 486 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = b.T
UpperCAmelCase__ = np.sum(np.square(__A ), axis=1 )
UpperCAmelCase__ = np.sum(np.square(__A ), axis=0 )
UpperCAmelCase__ = np.matmul(__A, __A )
UpperCAmelCase__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = x.reshape(-1, 3 )
UpperCAmelCase__ = squared_euclidean_distance(__A, __A )
return np.argmin(__A, axis=1 )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = ['pixel_values']
def __init__(self : Optional[int] , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
UpperCAmelCase__ = np.array(__UpperCAmelCase ) if clusters is not None else None
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = do_color_quantize
def lowercase_ (self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
__UpperCAmelCase , size=(size["height"], size["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = rescale(image=__UpperCAmelCase , scale=1 / 127.5 , data_format=__UpperCAmelCase )
UpperCAmelCase__ = image - 1
return image
def lowercase_ (self : Optional[int] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase__ = clusters if clusters is not None else self.clusters
UpperCAmelCase__ = np.array(__UpperCAmelCase )
UpperCAmelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase__ = np.array(__UpperCAmelCase )
UpperCAmelCase__ = color_quantize(__UpperCAmelCase , __UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase__ = images.shape[0]
UpperCAmelCase__ = images.reshape(__UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase__ = list(__UpperCAmelCase )
else:
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase__ = {"input_ids": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 486 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : float ):
return 10 - x * x
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) >= 0:
raise ValueError("""Wrong space!""" )
lowerCamelCase__ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase__ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) < 0:
lowerCamelCase__ = c
else:
lowerCamelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 9 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Tuple = 1_0 , _SCREAMING_SNAKE_CASE : int = 1_0_0_0 , _SCREAMING_SNAKE_CASE : str = True ):
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_SCREAMING_SNAKE_CASE : Tuple ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__SCREAMING_SNAKE_CASE : List[Any] = lower
__SCREAMING_SNAKE_CASE : str = higher
__SCREAMING_SNAKE_CASE : Optional[Any] = []
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
__SCREAMING_SNAKE_CASE : List[str] = number
elif answer(a_ ) == "high":
__SCREAMING_SNAKE_CASE : Dict = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = int(input("Enter lower value : " ).strip() )
__SCREAMING_SNAKE_CASE : List[str] = int(input("Enter high value : " ).strip() )
__SCREAMING_SNAKE_CASE : int = int(input("Enter value to guess : " ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise ValueError("iterations must be defined as integers")
if not isinstance(_lowerCamelCase , _lowerCamelCase) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0")
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz")
__UpperCamelCase : Optional[int] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCamelCase)
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 94 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[Any] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 94 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = GPTaTokenizer
lowerCAmelCase_ = GPTaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {"""add_prefix_space""": True}
lowerCAmelCase_ = False
def lowercase (self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
_snake_case = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case = {"""unk_token""": """<unk>"""}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def lowercase (self , **UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def lowercase (self , **UpperCAmelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def lowercase (self , UpperCAmelCase ) -> List[Any]:
_snake_case = """lower newer"""
_snake_case = """lower newer"""
return input_text, output_text
def lowercase (self ) -> int:
_snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = """lower newer"""
_snake_case = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def lowercase (self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
_snake_case = """lower newer"""
# Testing tokenization
_snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
_snake_case = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
_snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
_snake_case = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
_snake_case = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
_snake_case = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
_snake_case = tokens + [rust_tokenizer.unk_token]
_snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase (self , UpperCAmelCase=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
_snake_case = """This is a simple input"""
_snake_case = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case = ("""This is a simple input""", """This is a pair""")
_snake_case = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def lowercase (self ) -> Tuple:
_snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_snake_case = """This is a simple input"""
_snake_case = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case = ("""This is a simple input""", """This is a pair""")
_snake_case = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case = tokenizer.pad_token_id
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
_snake_case = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowercase (self ) -> List[str]:
_snake_case = """$$$"""
_snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
_snake_case = """This is a simple input"""
_snake_case = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case = tokenizer.bos_token_id
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case = tokenizer.decode(out_s.input_ids )
_snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_snake_case = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = """Encode this."""
_snake_case = """This one too please."""
_snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , )
_snake_case = encoded_sequence_dict["""input_ids"""]
_snake_case = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
_snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE__ )
]
_snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=SCREAMING_SNAKE_CASE__ )
_snake_case = """A photo of a cat"""
_snake_case = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
_snake_case = AutoTokenizer.from_pretrained("""./test_opt""" )
_snake_case = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
def lowercase (self ) -> Optional[int]:
_snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=SCREAMING_SNAKE_CASE__ )
_snake_case = """A photo of a cat"""
_snake_case = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowercase (self ) -> int:
_snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=SCREAMING_SNAKE_CASE__ )
_snake_case = """bos"""
_snake_case = tokenizer.get_vocab()["""bos"""]
_snake_case = """A photo of a cat"""
_snake_case = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
_snake_case = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
_snake_case = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] ) | 585 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[Any] = object()
# For specifying empty leaf dict `{}`
_A : Dict = object()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(snake_case_ ) - len(snake_case_ ) + 1 ):
__lowerCAmelCase = [x.match(snake_case_ ) for x, y in zip(snake_case_ , ks[i:] )]
if matches and all(snake_case_ ):
return True
return False
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def replace(snake_case_ : Tuple , snake_case_ : Optional[Any] ):
for rule, replacement in rules:
if _match(snake_case_ , snake_case_ ):
return replacement
return val
return replace
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , snake_case_ )),
(("transformer", "wte", "embedding"), P("""mp""" , snake_case_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case_ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , snake_case_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case_ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , snake_case_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = _get_partition_rules()
__lowerCAmelCase = _replacement_rules(snake_case_ )
__lowerCAmelCase = {k: _unmatched for k in flatten_dict(snake_case_ )}
__lowerCAmelCase = {k: replace(snake_case_ , snake_case_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case_ ) )
| 427 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=False , a_=True , a_=False , a_=True , a_=33 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : List[Any] = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : Tuple = scope
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : int = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : Optional[int] = EsmModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Optional[int] = model(a_ , attention_mask=a_ )
lowerCAmelCase : Optional[Any] = model(a_ )
lowerCAmelCase : int = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : Dict = EsmForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : int = EsmForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : int = config_and_inputs
lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = False
snake_case_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = ()
snake_case_ = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = EsmModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def _lowerCamelCase ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[Any] = EsmModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : List[Any] = EsmEmbeddings(config=a_ )
lowerCAmelCase : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCAmelCase : Dict = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase : List[Any] = create_position_ids_from_input_ids(a_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(a_ , a_ ) ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : Any = EsmEmbeddings(config=a_ )
lowerCAmelCase : int = torch.empty(2 , 4 , 30 )
lowerCAmelCase : Union[str, Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase : Tuple = embeddings.create_position_ids_from_inputs_embeds(a_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(a_ , a_ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
pass
@require_torch
class lowerCamelCase ( _A ):
@slow
def _lowerCamelCase ( self ):
with torch.no_grad():
lowerCAmelCase : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
lowerCAmelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : str = model(a_ )[0]
lowerCAmelCase : int = 33
lowerCAmelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , a_ )
lowerCAmelCase : Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
with torch.no_grad():
lowerCAmelCase : Dict = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
lowerCAmelCase : Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase : Optional[int] = model(a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase : Optional[int] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 718 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __A ( a_ : int ):
return EnvironmentCommand()
def __A ( a_ : Tuple ):
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase ( _A ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : int = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self , a_ , *a_ ):
lowerCAmelCase : List[str] = accelerate_config_file
def _lowerCamelCase ( self ):
lowerCAmelCase : int = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase : Optional[int] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase : Optional[int] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase : int = "not installed"
lowerCAmelCase : List[str] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase : Optional[int] = (
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else F'''\t{accelerate_config}'''
)
lowerCAmelCase : str = "not installed"
lowerCAmelCase : Union[str, Any] = "NA"
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = torch.__version__
lowerCAmelCase : List[Any] = torch.cuda.is_available()
lowerCAmelCase : int = "not installed"
lowerCAmelCase : Dict = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase : Optional[int] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase : List[Any] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase : Union[str, Any] = "not installed"
lowerCAmelCase : Any = "not installed"
lowerCAmelCase : str = "not installed"
lowerCAmelCase : Dict = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase : Union[str, Any] = flax.__version__
lowerCAmelCase : int = jax.__version__
lowerCAmelCase : Optional[Any] = jaxlib.__version__
lowerCAmelCase : int = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def _lowerCamelCase ( a_ ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 551 | 0 |
import math
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = 0
_lowerCAmelCase :Union[str, Any] = 0
while num > 0:
_lowerCAmelCase :int = num % 8
_lowerCAmelCase :Dict = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase ) ))
counter += 1
_lowerCAmelCase :Optional[int] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(_lowerCamelCase )}"""
def UpperCamelCase_( ):
"""simple docstring"""
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main() | 687 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
_UpperCAmelCase =params
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _snake_case ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def SCREAMING_SNAKE_CASE ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.params.max_model_input_size
_UpperCAmelCase =self.lengths > max_len
logger.info(F"Splitting {sum(_snake_case )} too long sequences." )
def divide_chunks(_snake_case , _snake_case ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
if self.params.mlm:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_UpperCAmelCase =np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
_UpperCAmelCase =np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =len(self )
_UpperCAmelCase =self.lengths > 11
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def SCREAMING_SNAKE_CASE ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =len(self )
_UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_UpperCAmelCase =(unk_occs / self.lengths) < 0.5
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def SCREAMING_SNAKE_CASE ( self ):
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[t[0] for t in batch]
_UpperCAmelCase =[t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
_UpperCAmelCase =max(_snake_case )
# Pad token ids
if self.params.mlm:
_UpperCAmelCase =self.params.special_tok_ids["pad_token"]
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =[list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
_UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
_UpperCAmelCase =torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 408 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Dict ) -> Optional[Any]:
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : List[str] = 250
lowerCamelCase_ : Any = ids_tensor((batch_size, length) , __magic_name__ )
lowerCamelCase_ : Tuple = torch.ones((batch_size, length) , device=__magic_name__ , dtype=torch.float ) / length
return input_ids, scores
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ : int = self._get_tensors(5 )
lowerCamelCase_ : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : int = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ : int = MaxLengthCriteria(max_length=10 )
lowerCamelCase_ : Dict = self._get_tensors(5 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : int = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCamelCase_ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : Any = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : str = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
lowerCamelCase_ : List[Any] = self._get_tensors(5 )
lowerCamelCase_ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__magic_name__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCamelCase_ : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__magic_name__ ) , 1 )
| 714 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case_ : str = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__magic_name__ : int , **__magic_name__ : List[str] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 253 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def lowerCAmelCase__ ( __magic_name__=True ) ->int:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : int = None
_lowerCamelCase : int = None
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
__lowercase = dataset_module_factory(_lowerCamelCase , cache_dir=_lowerCamelCase )
__lowercase = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
__lowercase = builder_cls(
cache_dir=_lowerCamelCase , config_name=_lowerCamelCase , hash=dataset_module.hash , )
__lowercase = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCamelCase ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
__lowercase = cached_path(_lowerCamelCase , cache_dir=_lowerCamelCase )
self.assertTrue(os.path.exists(_lowerCamelCase ) )
@pytest.mark.integration
def lowerCAmelCase__ ( __magic_name__ ) ->str:
__lowercase = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
__lowercase = dataset_module_factory("wikipedia" , cache_dir=__magic_name__ )
__lowercase = import_main_class(dataset_module.module_path )
__lowercase = builder_cls(
cache_dir=__magic_name__ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowercase = None
builder_instance.download_and_prepare()
__lowercase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCAmelCase__ ( __magic_name__ ) ->Any:
__lowercase = dataset_module_factory("wikipedia" , cache_dir=__magic_name__ )
__lowercase = import_main_class(dataset_module.module_path , dataset=__magic_name__ )
__lowercase = builder_cls(
cache_dir=__magic_name__ , config_name="20220301.frr" , hash=dataset_module.hash , )
__lowercase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__magic_name__ , __magic_name__ )
assert "train" in ds
assert isinstance(ds["train"] , __magic_name__ )
assert next(iter(ds["train"] ) )
| 118 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a__ (__lowercase :str , __lowercase :str , __lowercase :str ) -> str:
def get_masked_lm_array(__lowercase :str ):
_A : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_A : str = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
_A : int = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_array(__lowercase :str ):
_A : Optional[int] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_A : Tuple = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
_A : Dict = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_layer_array(__lowercase :int , __lowercase :str ):
_A : List[str] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_A : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
_A : Dict = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_attention_layer_array(__lowercase :int , __lowercase :str , __lowercase :Union[str, Any] ):
_A : Optional[int] = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_A : Dict = tf.train.load_variable(__lowercase , __lowercase )
_A : List[str] = array.reshape(__lowercase )
if "kernel" in name:
_A : Dict = array.transpose()
return torch.from_numpy(__lowercase )
print(f"""Loading model based on config from {config_path}...""" )
_A : Optional[Any] = BertConfig.from_json_file(__lowercase )
_A : Optional[int] = BertForMaskedLM(__lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_A : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_A : BertSelfAttention = layer.attention.self
_A : Tuple = get_encoder_attention_layer_array(
__lowercase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_A : Union[str, Any] = get_encoder_attention_layer_array(
__lowercase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_A : Tuple = get_encoder_attention_layer_array(
__lowercase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_A : str = get_encoder_attention_layer_array(
__lowercase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_A : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_A : List[str] = get_encoder_attention_layer_array(
__lowercase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_A : BertSelfOutput = layer.attention.output
_A : Optional[int] = get_encoder_attention_layer_array(
__lowercase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_A : Union[str, Any] = get_encoder_attention_layer_array(
__lowercase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_A : Dict = get_encoder_layer_array(__lowercase , '''_attention_layer_norm/gamma''' )
_A : List[str] = get_encoder_layer_array(__lowercase , '''_attention_layer_norm/beta''' )
# Intermediate
_A : BertIntermediate = layer.intermediate
_A : str = get_encoder_layer_array(__lowercase , '''_intermediate_dense/kernel''' )
_A : List[str] = get_encoder_layer_array(__lowercase , '''_intermediate_dense/bias''' )
# Output
_A : BertOutput = layer.output
_A : Tuple = get_encoder_layer_array(__lowercase , '''_output_dense/kernel''' )
_A : str = get_encoder_layer_array(__lowercase , '''_output_dense/bias''' )
_A : List[str] = get_encoder_layer_array(__lowercase , '''_output_layer_norm/gamma''' )
_A : List[Any] = get_encoder_layer_array(__lowercase , '''_output_layer_norm/beta''' )
# Embeddings
_A : Union[str, Any] = get_encoder_array('''_position_embedding_layer/embeddings''' )
_A : Dict = get_encoder_array('''_type_embedding_layer/embeddings''' )
_A : Dict = get_encoder_array('''_embedding_norm_layer/gamma''' )
_A : Optional[Any] = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_A : Optional[int] = model.cls.predictions.transform
_A : Optional[Any] = get_masked_lm_array('''dense/kernel''' )
_A : Optional[int] = get_masked_lm_array('''dense/bias''' )
_A : Dict = get_masked_lm_array('''layer_norm/gamma''' )
_A : List[str] = get_masked_lm_array('''layer_norm/beta''' )
_A : Dict = get_masked_lm_array('''embedding_table''' )
# Pooling
_A : Union[str, Any] = BertPooler(config=__lowercase )
_A : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
_A : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__lowercase )
# Integration test - should load without any errors ;)
_A : Optional[int] = BertForMaskedLM.from_pretrained(__lowercase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_UpperCamelCase : Any =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_UpperCamelCase : List[Any] =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 714 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,*A__ ,**A__ ):
super().__init__(*A__ ,**A__ )
requires_backends(self ,'''vision''' )
self.check_model_type(A__ )
def __call__( self ,A__ ,**A__ ):
return super().__call__(A__ ,**A__ )
def A__ ( self ,**A__ ):
return {}, {}, {}
def A__ ( self ,A__ ):
_A : Optional[int] = load_image(A__ )
_A : List[Any] = image.size
_A : Any = self.image_processor(images=A__ ,return_tensors=self.framework )
return model_inputs
def A__ ( self ,A__ ):
_A : Tuple = self.model(**A__ )
return model_outputs
def A__ ( self ,A__ ):
_A : Tuple = model_outputs.predicted_depth
_A : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=A__ )
_A : Tuple = prediction.squeeze().cpu().numpy()
_A : Any = (output * 255 / np.max(A__ )).astype('''uint8''' )
_A : List[str] = Image.fromarray(A__ )
_A : Optional[int] = {}
_A : Any = predicted_depth
_A : Optional[Any] = depth
return output_dict
| 332 | 0 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ : Optional[Any] =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 101 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , A : str = "cpu" , A : str = "openai/clip-vit-large-patch14" ):
__snake_case: Optional[int] = device
__snake_case: int = CLIPTokenizerFast.from_pretrained(A )
__snake_case: Optional[Any] = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case: Dict = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case: List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case: Union[str, Any] = torchvision.transforms.Resize(224 )
__snake_case: Optional[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase__ ( self : int , A : int ):
__snake_case: int = self.resize(A )
__snake_case: str = self.center_crop(A )
__snake_case: int = self.normalize(A )
return images
def __call__( self : List[str] , A : Optional[Any]=None , A : Dict=None , **A : Optional[int] ):
__snake_case: int = self.tokenizer(text=A , **A )
__snake_case: int = self.preprocess_img(A )
__snake_case: Dict = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , A : int=10 , A : str=0.01 , A : Optional[Any]=None , A : List[Any]=None , A : int=None , A : Dict=None , A : Any=None , A : int=None , A : Optional[int]=False , A : int=True , A : int="image" , A : Optional[Any]=True , A : Optional[int]=False , A : Optional[Any]=False , A : Any=False , ):
super().__init__()
__snake_case: Dict = None
__snake_case: Dict = device if device else get_device()
if vqgan:
__snake_case: Dict = vqgan
else:
__snake_case: int = load_vqgan(self.device , conf_path=A , ckpt_path=A )
self.vqgan.eval()
if clip:
__snake_case: str = clip
else:
__snake_case: Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__snake_case: Dict = ProcessorGradientFlow(device=self.device )
__snake_case: List[Any] = iterations
__snake_case: Union[str, Any] = lr
__snake_case: Dict = log
__snake_case: Tuple = make_grid
__snake_case: Any = return_val
__snake_case: Tuple = quantize
__snake_case: Union[str, Any] = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self : Any , A : Dict=None , A : Dict=None , A : Union[str, Any]=5 , A : Tuple=True ):
__snake_case: str = []
if output_path is None:
__snake_case: List[str] = """./animation.gif"""
if input_path is None:
__snake_case: str = self.save_path
__snake_case: Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(A ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(A ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__snake_case: Union[str, Any] = total_duration / len(A )
__snake_case: Tuple = [frame_duration] * len(A )
if extend_frames:
__snake_case: Any = 1.5
__snake_case: Optional[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(A ) )
imageio.mimsave(A , A , duration=A )
print(f'''gif saved to {output_path}''' )
def UpperCAmelCase__ ( self : str , A : Union[str, Any]=None , A : List[str]=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__snake_case: Optional[Any] = preprocess(Image.open(A ) , target_image_size=256 ).to(self.device )
__snake_case: Union[str, Any] = preprocess_vqgan(A )
__snake_case , *__snake_case: List[Any] = self.vqgan.encode(A )
return z
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any ):
__snake_case: Dict = self.latent.detach().requires_grad_()
__snake_case: List[str] = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case: List[Any] = self.vqgan.quantize(A )
else:
__snake_case: Optional[int] = trans_latent
return self.vqgan.decode(A )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : List[str] , A : int=None ):
__snake_case: int = self.clip_preprocessor(text=A , images=A , return_tensors="""pt""" , padding=A )
__snake_case: Optional[int] = self.clip(**A )
__snake_case: Dict = clip_outputs.logits_per_image
if weights is not None:
__snake_case: List[str] = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self : List[str] , A : Union[str, Any] , A : Any , A : Optional[int] ):
__snake_case: List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , A , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__snake_case: str = self._get_clip_similarity(neg_prompts["""prompts"""] , A , weights=neg_prompts["""weights"""] )
else:
__snake_case: int = torch.tensor([1] , device=self.device )
__snake_case: str = -torch.log(A ) + torch.log(A )
return loss
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[int] , A : Optional[int] ):
__snake_case: List[Any] = torch.randn_like(self.latent , requires_grad=A , device=self.device )
__snake_case: int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case: Dict = self._add_vector(A )
__snake_case: List[str] = loop_post_process(A )
__snake_case: Dict = self._get_CLIP_loss(A , A , A )
print("""CLIP loss""" , A )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase__ ( self : List[str] , A : List[Any] , A : Tuple , A : Union[str, Any] ):
wandb.init(reinit=A , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__snake_case: Dict = Image.open(A )
__snake_case: Any = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(A ) )
def UpperCAmelCase__ ( self : Any , A : Optional[int] ):
if not prompts:
return []
__snake_case: Any = []
__snake_case: int = []
if isinstance(A , A ):
__snake_case: int = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(A , (tuple, list) ):
__snake_case: List[Any] = prompt[0]
__snake_case: List[Any] = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case: Union[str, Any] = prompt.split(""":""" )
__snake_case: List[Any] = float(A )
else:
__snake_case: Union[str, Any] = prompt
__snake_case: Union[str, Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A , device=self.device ),
}
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] , A : Tuple=None , A : int=None , A : Optional[Any]=True , A : int=False , A : Union[str, Any]=True , A : Dict=True , A : Tuple=None , ):
if image_path:
__snake_case: Tuple = self._get_latent(A )
else:
__snake_case: List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A , A , A )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case: str = self.process_prompts(A )
__snake_case: str = self.process_prompts(A )
if save_final and save_path is None:
__snake_case: Any = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
__snake_case: Optional[int] = save_path + """_""" + get_timestamp()
os.makedirs(A )
__snake_case: Dict = save_path
__snake_case: List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(A ) )
__snake_case: str = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A , A , A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 155 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : int , A : List[Any] ):
__snake_case: Optional[int] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__snake_case: Optional[int] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : int ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCAmelCase__ ( self : str , A : Any ):
# create estimator
__snake_case: str = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 155 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = CodeGenTokenizer
lowerCAmelCase__ : Dict = CodeGenTokenizerFast
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Tuple = {"""add_prefix_space""": True}
lowerCAmelCase__ : Optional[int] = False
def A ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
a_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
a_ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ : int = {"unk_token": "<unk>"}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def A ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : int = "lower newer"
a_ : Optional[Any] = "lower newer"
return input_text, output_text
def A ( self ) -> Dict:
a_ : Dict = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : int = "lower newer"
a_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
a_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : Tuple = tokens + [tokenizer.unk_token]
a_ : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
a_ : int = self.get_tokenizer()
a_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
a_ : List[str] = "lower newer"
# Testing tokenization
a_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
a_ : str = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
a_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
a_ : int = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing the unknown token
a_ : Tuple = tokens + [rust_tokenizer.unk_token]
a_ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A ( self , _SCREAMING_SNAKE_CASE=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
a_ : List[Any] = "This is a simple input"
a_ : int = ["This is a simple input 1", "This is a simple input 2"]
a_ : List[Any] = ("This is a simple input", "This is a pair")
a_ : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" , )
def A ( self ) -> List[Any]:
a_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
a_ : Union[str, Any] = "This is a simple input"
a_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
a_ : Dict = ("This is a simple input", "This is a pair")
a_ : str = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
a_ : Tuple = tokenizer.pad_token_id
a_ : Optional[int] = tokenizer(_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=3_0 , return_tensors="np" )
a_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors="np" )
a_ : Optional[Any] = tokenizer(*_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=6_0 , return_tensors="np" )
a_ : Optional[int] = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A ( self ) -> str:
a_ : int = "$$$"
a_ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_SCREAMING_SNAKE_CASE , add_bos_token=_SCREAMING_SNAKE_CASE )
a_ : List[str] = "This is a simple input"
a_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
a_ : List[str] = tokenizer.bos_token_id
a_ : Any = tokenizer(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a_ : List[Any] = tokenizer.decode(out_s.input_ids )
a_ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A ( self ) -> str:
a_ : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
a_ : Tuple = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
a_ : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
a_ : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
a_ : Any = tokenizer.decode(_SCREAMING_SNAKE_CASE , truncate_before_pattern=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A ( self ) -> str:
pass
| 473 | """simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int = 1 , _SCREAMING_SNAKE_CASE :int = 1000 ) -> int:
a_ : Tuple = 1
a_ : Optional[int] = 0
for divide_by_number in range(_SCREAMING_SNAKE_CASE , digit + 1 ):
a_ : list[int] = []
a_ : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_SCREAMING_SNAKE_CASE )
a_ : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str =list(__lowercase )
UpperCamelCase_ : Optional[Any] =list(__lowercase )
UpperCamelCase_ : Tuple =0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase_ : Dict ='_'
if count > 1:
return False
else:
return "".join(__lowercase )
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =[]
while True:
UpperCamelCase_ : List[Any] =['$'] * len(__lowercase )
UpperCamelCase_ : Tuple =[]
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
UpperCamelCase_ : Any =compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase_ : Any ='*'
UpperCamelCase_ : Tuple ='*'
temp.append('X' )
for i in range(len(__lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowercase ) == 0:
return pi
UpperCamelCase_ : Union[str, Any] =list(set(__lowercase ) )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =[]
for minterm in minterms:
UpperCamelCase_ : int =''
for _ in range(__lowercase ):
UpperCamelCase_ : int =str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowercase )
return temp
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Optional[Any] =list(__lowercase )
UpperCamelCase_ : Dict =list(__lowercase )
UpperCamelCase_ : int =0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Union[str, Any] =[]
UpperCamelCase_ : Optional[int] =[0] * len(__lowercase )
for i in range(len(chart[0] ) ):
UpperCamelCase_ : Dict =0
UpperCamelCase_ : List[str] =-1
for j in range(len(__lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase_ : Any =j
if count == 1:
UpperCamelCase_ : Union[str, Any] =1
for i in range(len(__lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowercase ) ):
UpperCamelCase_ : List[Any] =0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase_ : Tuple =0
UpperCamelCase_ : List[str] =-1
UpperCamelCase_ : Dict =0
for i in range(len(__lowercase ) ):
UpperCamelCase_ : Any =chart[i].count(1 )
if count_n > max_n:
UpperCamelCase_ : Optional[Any] =count_n
UpperCamelCase_ : Tuple =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowercase ) ):
UpperCamelCase_ : Optional[Any] =0
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Optional[Any] =[[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )]
for i in range(len(__lowercase ) ):
UpperCamelCase_ : str =prime_implicants[i].count('_' )
for j in range(len(__lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowercase ):
UpperCamelCase_ : List[str] =1
return chart
def A_ ( ):
UpperCamelCase_ : List[str] =int(input('Enter the no. of variables\n' ) )
UpperCamelCase_ : Tuple =[
float(__lowercase )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
UpperCamelCase_ : Tuple =decimal_to_binary(__lowercase , __lowercase )
UpperCamelCase_ : Union[str, Any] =check(__lowercase )
print('Prime Implicants are:' )
print(__lowercase )
UpperCamelCase_ : List[str] =prime_implicant_chart(__lowercase , __lowercase )
UpperCamelCase_ : Optional[int] =selection(__lowercase , __lowercase )
print('Essential Prime Implicants are:' )
print(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 395 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase ):
UpperCamelCase_ : int =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCamelCase_ : Union[str, Any] =1_92
UpperCamelCase_ : Optional[Any] =7_68
UpperCamelCase_ : Optional[int] =12
UpperCamelCase_ : Tuple =3
UpperCamelCase_ : List[str] =[8_00, 13_33]
UpperCamelCase_ : List[str] =False
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : str =3_30
UpperCamelCase_ : Dict =14
UpperCamelCase_ : List[str] =6
UpperCamelCase_ : List[str] =13_20
elif "yolos_s" in yolos_name:
UpperCamelCase_ : Optional[int] =3_84
UpperCamelCase_ : str =15_36
UpperCamelCase_ : List[Any] =12
UpperCamelCase_ : Union[str, Any] =6
elif "yolos_b" in yolos_name:
UpperCamelCase_ : Optional[Any] =[8_00, 13_44]
UpperCamelCase_ : Union[str, Any] =91
UpperCamelCase_ : Optional[Any] ='huggingface/label-files'
UpperCamelCase_ : Union[str, Any] ='coco-detection-id2label.json'
UpperCamelCase_ : Optional[Any] =json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str ={int(__lowercase ): v for k, v in idalabel.items()}
UpperCamelCase_ : str =idalabel
UpperCamelCase_ : Optional[int] ={v: k for k, v in idalabel.items()}
return config
def A_ ( __lowercase , __lowercase , __lowercase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_ : Optional[Any] =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_ : str =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : int =in_proj_weight[: config.hidden_size, :]
UpperCamelCase_ : Optional[Any] =in_proj_bias[: config.hidden_size]
UpperCamelCase_ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_ : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_ : Any =in_proj_weight[-config.hidden_size :, :]
UpperCamelCase_ : List[str] =in_proj_bias[-config.hidden_size :]
def A_ ( __lowercase ):
if "backbone" in name:
UpperCamelCase_ : Any =name.replace('backbone' , 'vit' )
if "cls_token" in name:
UpperCamelCase_ : str =name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
UpperCamelCase_ : Optional[Any] =name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
UpperCamelCase_ : List[Any] =name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
UpperCamelCase_ : str =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase_ : Dict =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
UpperCamelCase_ : Optional[int] =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase_ : Optional[Any] =name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase_ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase_ : Tuple =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase_ : List[str] =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase_ : int =name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
UpperCamelCase_ : Any =name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('vit.norm' , 'vit.layernorm' )
return name
def A_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : List[str] =orig_state_dict.pop(__lowercase )
if "qkv" in key:
UpperCamelCase_ : Any =key.split('.' )
UpperCamelCase_ : List[str] =int(key_split[2] )
UpperCamelCase_ : Union[str, Any] =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCamelCase_ : Optional[int] =val[:dim, :]
UpperCamelCase_ : List[str] =val[
dim : dim * 2, :
]
UpperCamelCase_ : Tuple =val[-dim:, :]
else:
UpperCamelCase_ : Optional[Any] =val[:dim]
UpperCamelCase_ : Optional[Any] =val[dim : dim * 2]
UpperCamelCase_ : Union[str, Any] =val[-dim:]
else:
UpperCamelCase_ : Dict =val
return orig_state_dict
def A_ ( ):
UpperCamelCase_ : str ='http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[int] =Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase = False ):
UpperCamelCase_ : int =get_yolos_config(__lowercase )
# load original state_dict
UpperCamelCase_ : Union[str, Any] =torch.load(__lowercase , map_location='cpu' )['model']
# load 🤗 model
UpperCamelCase_ : Dict =YolosForObjectDetection(__lowercase )
model.eval()
UpperCamelCase_ : Union[str, Any] =convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCamelCase_ : Any =8_00 if yolos_name != 'yolos_ti' else 5_12
UpperCamelCase_ : List[Any] =YolosImageProcessor(format='coco_detection' , size=__lowercase )
UpperCamelCase_ : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase_ : int =model(**__lowercase )
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =outputs.logits, outputs.pred_boxes
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =None, None
if yolos_name == "yolos_ti":
UpperCamelCase_ : List[str] =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCamelCase_ : str =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCamelCase_ : Tuple =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCamelCase_ : Any =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCamelCase_ : Tuple =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCamelCase_ : Tuple =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
UpperCamelCase_ : Tuple ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
UpperCamelCase_ : Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization='hustvl' )
model.push_to_hub(__lowercase , organization='hustvl' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 395 | 1 |
from collections import defaultdict
def a ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = first_str.lower().strip()
SCREAMING_SNAKE_CASE__ : Dict = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE__ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE__ : defaultdict[str, int] = defaultdict(__lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ :Optional[int] = input('Enter the first string ').strip()
a_ :List[str] = input('Enter the second string ').strip()
a_ :Any = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 35 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[str] = value
elif weight_type == "weight_g":
__lowerCamelCase : Dict = value
elif weight_type == "weight_v":
__lowerCamelCase : str = value
elif weight_type == "bias":
__lowerCamelCase : Optional[Any] = value
else:
__lowerCamelCase : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : int = []
__lowerCamelCase : int = fairseq_model.state_dict()
__lowerCamelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : int = True
if "*" in mapped_key:
__lowerCamelCase : Optional[int] = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Optional[int] = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : Dict = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : List[str] = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : List[Any] = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
__lowerCamelCase : Any = torch.load(UpperCAmelCase )
__lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["""cfg"""] )
__lowerCamelCase : Any = WavLMOrig(UpperCAmelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__lowerCamelCase : Union[str, Any] = WavLMConfig.from_pretrained(UpperCAmelCase )
else:
__lowerCamelCase : str = WavLMConfig()
__lowerCamelCase : Dict = WavLMModel(UpperCAmelCase )
recursively_load_weights(UpperCAmelCase , UpperCAmelCase )
hf_wavlm.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCamelCase : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 458 |
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowerCamelCase : Dict = hex_num[0] == """-"""
if is_negative:
__lowerCamelCase : Optional[Any] = hex_num[1:]
try:
__lowerCamelCase : Any = int(UpperCAmelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowerCamelCase : List[str] = """"""
while int_num > 0:
__lowerCamelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 458 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCamelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCamelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
UpperCamelCase = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
UpperCamelCase = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
UpperCamelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
UpperCamelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 269 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=False ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__UpperCamelCase : Tuple = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__UpperCamelCase : int = torch.load(__lowerCAmelCase , map_location="""cpu""" )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__UpperCamelCase : List[Any] = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__UpperCamelCase : List[str] = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def __lowerCamelCase ( __lowerCAmelCase : Tuple[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, jnp.ndarray] , __lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__UpperCamelCase : Optional[int] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__UpperCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__UpperCamelCase : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__UpperCamelCase : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__UpperCamelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__UpperCamelCase : Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__UpperCamelCase : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Union[str, Any]:
# convert pytorch tensor to numpy
__UpperCamelCase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__UpperCamelCase : Union[str, Any] = flax_model.params["""params"""]
else:
__UpperCamelCase : List[Any] = flax_model.params
__UpperCamelCase : List[Any] = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase : List[str] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(__lowerCAmelCase )
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__UpperCamelCase : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Optional[int] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__UpperCamelCase : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Optional[Any] = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Optional[int] = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Dict:
import torch
# Load the index
__UpperCamelCase : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__UpperCamelCase : List[str] = torch.load(__lowerCAmelCase )
__UpperCamelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase : Optional[Any] = flax_model.params["""params"""]
__UpperCamelCase : int = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__UpperCamelCase : Dict = flax_model.params
__UpperCamelCase : int = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : Any = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__UpperCamelCase : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Optional[Any] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__UpperCamelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__UpperCamelCase : List[Any] = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__UpperCamelCase : Optional[Any] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__UpperCamelCase : str = getattr(__lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , """rb""" ) as state_f:
try:
__UpperCamelCase : Optional[int] = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__UpperCamelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda __lowerCAmelCase : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__UpperCamelCase : List[Any] = jax.tree_util.tree_map(
lambda __lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
__UpperCamelCase : Tuple = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : int = pt_model.state_dict()
__UpperCamelCase : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__UpperCamelCase : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__UpperCamelCase : Dict = []
__UpperCamelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
__UpperCamelCase : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Tuple = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
__UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
__UpperCamelCase : Dict = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
__UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
__UpperCamelCase : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__UpperCamelCase : List[str] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__UpperCamelCase : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__UpperCamelCase : Optional[int] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__UpperCamelCase : Optional[int] = """.""".join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__UpperCamelCase : Union[str, Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__UpperCamelCase : int = key.split(""".""" )
__UpperCamelCase : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__UpperCamelCase : Union[str, Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__UpperCamelCase : Tuple = key_components[-2] + """_v"""
if name is not None:
__UpperCamelCase : Optional[Any] = key_components[:-3] + [name]
__UpperCamelCase : int = """.""".join(__lowerCAmelCase )
__UpperCamelCase : List[str] = key
if flax_key in special_pt_names:
__UpperCamelCase : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__UpperCamelCase : str = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
__UpperCamelCase : Optional[int] = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
__UpperCamelCase : Union[str, Any] = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 269 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
snake_case_ = 'big_bird'
def __init__( self : Tuple , UpperCamelCase_ : str=50_358 , UpperCamelCase_ : List[Any]=768 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Optional[Any]=3_072 , UpperCamelCase_ : Union[str, Any]="gelu_new" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : str=4_096 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Any=1e-12 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str=0 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=66 , UpperCamelCase_ : Union[str, Any]="block_sparse" , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=64 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Any = vocab_size
_lowercase : Dict = max_position_embeddings
_lowercase : Optional[int] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = initializer_range
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : Optional[Any] = use_cache
_lowercase : List[str] = rescale_embeddings
_lowercase : Any = attention_type
_lowercase : int = use_bias
_lowercase : List[Any] = block_size
_lowercase : int = num_random_blocks
_lowercase : Any = classifier_dropout
class _lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
@property
def __lowercase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowercase : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 714 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=10 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=32 * 4 , UpperCamelCase_ : int=32 * 6 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Tuple=32 , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = parent
_lowercase : List[str] = batch_size
_lowercase : Union[str, Any] = is_training
_lowercase : List[str] = use_auxiliary_loss
_lowercase : Tuple = num_queries
_lowercase : Union[str, Any] = num_channels
_lowercase : Tuple = min_size
_lowercase : Optional[Any] = max_size
_lowercase : List[Any] = num_labels
_lowercase : Tuple = mask_feature_size
def __lowercase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
_lowercase : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
_lowercase : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
_lowercase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
_lowercase : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Tuple ) -> Any:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.prepare_config_and_inputs()
_lowercase : Tuple = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = output.encoder_hidden_states
_lowercase : str = output.pixel_decoder_hidden_states
_lowercase : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
_lowercase : Any = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
_lowercase : List[Any] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowercase : List[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
_lowercase : str = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
_lowercase : Dict = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ) -> Dict:
'''simple docstring'''
_lowercase : str = MaskFormerModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __lowercase ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __lowercase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __lowercase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __lowercase ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __lowercase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self : List[Any] ) -> str:
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(UpperCamelCase_ )
_lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : int = [*signature.parameters.keys()]
_lowercase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def __lowercase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowercase : Any = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __lowercase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : int = (self.model_tester.min_size,) * 2
_lowercase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
_lowercase : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
_lowercase : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
_lowercase : List[str] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowercase : List[Any] = self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
_lowercase : Any = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def __lowercase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Dict = self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[Any] = True
_lowercase : Optional[int] = True
_lowercase : Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
_lowercase : Optional[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
_lowercase : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowercase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowercase : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowercase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def _SCREAMING_SNAKE_CASE( ) ->Tuple:
'''simple docstring'''
_lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __lowercase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(UpperCamelCase_ )
_lowercase : Optional[Any] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : str = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Dict = model(**UpperCamelCase_ )
_lowercase : Any = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
_lowercase : List[str] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
_lowercase : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : Optional[int] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Optional[Any] = model(**UpperCamelCase_ )
# masks_queries_logits
_lowercase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase : int = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_lowercase : List[str] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
_lowercase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase : Tuple = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
_lowercase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_img()
_lowercase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Optional[int] = model(**UpperCamelCase_ )
# masks_queries_logits
_lowercase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase : List[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_lowercase : Dict = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
_lowercase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase : List[str] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : List[str] = self.default_image_processor
_lowercase : Any = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowercase : Optional[int] = inputs['''pixel_values'''].to(UpperCamelCase_ )
_lowercase : int = [el.to(UpperCamelCase_ ) for el in inputs['''mask_labels''']]
_lowercase : Any = [el.to(UpperCamelCase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowercase : List[str] = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 411 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Optional[int] = BioGptTokenizer
_lowercase : List[Any] = False
def lowerCAmelCase_ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__A : Tuple = dict(zip(__A , range(len(__A ) ) ) )
__A : List[str] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : List[Any] ):
__A : Optional[Any] = """lower newer"""
__A : Optional[Any] = """lower newer"""
return input_text, output_text
def lowerCAmelCase_ ( self : Dict ):
__A : int = BioGptTokenizer(self.vocab_file , self.merges_file )
__A : Any = """lower"""
__A : str = ["""low""", """er</w>"""]
__A : Optional[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__A : Any = tokens + ["""<unk>"""]
__A : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def lowerCAmelCase_ ( self : Tuple ):
__A : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__A : str = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__A : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__A : Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
__A : Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 17 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : List[str] , a_ : Tuple=3 , a_ : Any=7 , a_ : Any=True , a_ : Union[str, Any]=True , a_ : Tuple=False , a_ : Optional[int]=True , a_ : Any=99 , a_ : Dict=32 , a_ : Dict=5 , a_ : List[Any]=4 , a_ : Any=37 , a_ : Any="gelu" , a_ : List[str]=0.1 , a_ : Dict=0.1 , a_ : Optional[Any]=512 , a_ : List[Any]=16 , a_ : Any=2 , a_ : str=0.02 , a_ : Any=3 , a_ : List[Any]=4 , a_ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Any ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , )
def A ( self : List[str] , a_ : Dict , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : Dict , a_ : Dict , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = FalconModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = True
__snake_case = FalconModel(a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
__snake_case = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , a_ : int , a_ : int , a_ : List[Any] , a_ : str , a_ : List[str] , a_ : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : Tuple , a_ : str , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Dict , ):
"""simple docstring"""
__snake_case = True
__snake_case = True
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = FalconModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , *__snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case = alibi
self.model_tester.create_and_check_model(a_ , *a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "single_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = input_dict["input_ids"]
__snake_case = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , use_cache=a_ )
__snake_case = input_ids.shape[0]
__snake_case = model._convert_to_rw_cache(result.past_key_values )
__snake_case = model._convert_cache_to_standard_format(a_ , a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "multi_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Dict ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_ , "use_cache" ):
return
__snake_case = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
__snake_case = True
__snake_case = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case = (
getattr(a_ , "decoder_layers" , a_ )
or getattr(a_ , "num_decoder_layers" , a_ )
or config.num_hidden_layers
)
__snake_case = getattr(a_ , "num_kv_heads" , config.num_attention_heads )
__snake_case = getattr(a_ , "d_model" , config.hidden_size )
__snake_case = embed_dim // num_attention_heads
__snake_case = outputs["past_key_values"]
self.assertEqual(len(a_ ) , a_ )
__snake_case , __snake_case = inputs["input_ids"].shape
for i in range(a_ ):
if config.new_decoder_architecture:
__snake_case = config.num_attention_heads
elif config.multi_query:
__snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__snake_case = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
__snake_case = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 )
__snake_case = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ , a_ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : Any ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# Test results are the same with and without cache
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 69 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase : List[str] = "Muhammad Umer Farooq"
_UpperCamelCase : List[Any] = "MIT"
_UpperCamelCase : Any = "1.0.0"
_UpperCamelCase : List[Any] = "Muhammad Umer Farooq"
_UpperCamelCase : List[str] = "[email protected]"
_UpperCamelCase : List[Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = []
lowerCAmelCase = domain
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase = parse.urljoin(self.domain , _SCREAMING_SNAKE_CASE )
self.urls.append(_SCREAMING_SNAKE_CASE )
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(snake_case ).split('.' )[-2:] )
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
return parse.urlparse(snake_case ).netloc
def snake_case ( snake_case : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
lowerCAmelCase = get_domain_name(snake_case )
# Initialize the parser
lowerCAmelCase = Parser(snake_case )
try:
# Open URL
lowerCAmelCase = requests.get(snake_case )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase = requests.get(snake_case )
# Get the valid email.
lowerCAmelCase = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case )
if __name__ == "__main__":
_UpperCamelCase : Any = emails_from_url("https://github.com")
print(F"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 700 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a_ ):
SCREAMING_SNAKE_CASE : List[str] = ['''torch''', '''torchsde''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 514 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 142 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 142 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=18 , UpperCamelCase__ : List[Any]=30 , UpperCamelCase__ : Union[str, Any]=4_00 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "clusters" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(UpperCamelCase__ , "image_processor.json" )
image_processor_first.to_json_file(UpperCamelCase__ )
UpperCAmelCase = self.image_processing_class.from_json_file(UpperCamelCase__ ).to_dict()
UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = self.image_processing_class.from_pretrained(UpperCamelCase__ ).to_dict()
UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_() -> List[Any]:
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase = Image.open(dataset[4]["file"] )
UpperCAmelCase = Image.open(dataset[5]["file"] )
UpperCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase = prepare_images()
# test non-batched
UpperCAmelCase = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
UpperCAmelCase = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase__ )
# test batched
UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
UpperCAmelCase = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase__ )
| 323 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : int=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict="divided_space_time" , UpperCamelCase__ : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_frames
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = attention_type
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = TimesformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = TimesformerForVideoClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
# verify the logits shape
UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Optional[Any] =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase : Union[str, Any] =(
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase : List[str] =False
lowercase : Any =False
lowercase : Any =False
lowercase : Tuple =False
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = TimesformerModelTester(self )
UpperCAmelCase = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TimesformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.num_frames
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase = len(UpperCamelCase__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_() -> Optional[Any]:
UpperCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCAmelCase = np.load(lowerCamelCase_ )
return list(lowerCamelCase_ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_video()
UpperCAmelCase = image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 323 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 257 | '''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: int ):
_UpperCAmelCase : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
_UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
_UpperCAmelCase : Union[str, Any] = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase ( a_: Optional[Any], a_: Union[str, Any], a_: str ):
_UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["model.linear.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = UniSpeechSatForXVector.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[Any] = downstream_dict["connector.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : str = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple, a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : Any = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : Tuple = checkpoint["Downstream"]
_UpperCAmelCase : Dict = UniSpeechSatConfig.from_pretrained(a_ )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
a_, return_attention_mask=a_, do_normalize=a_ )
_UpperCAmelCase : List[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : Union[str, Any] = convert_classification(a_, a_, a_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Any = convert_diarization(a_, a_, a_ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : str = convert_xvector(a_, a_, a_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : Union[str, Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 257 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
a__ : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : Dict = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : Tuple = f'''{src_lang}-{tgt_lang}'''
a__ : int = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__a , exist_ok=__a )
a__ : Union[str, Any] = os.path.join(__a , "README.md" )
print(f'''Generating {path}''' )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
UpperCamelCase : Tuple = Path(__file__).resolve().parent.parent.parent
UpperCamelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = model_name.split("""-""")
UpperCamelCase : Dict = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=10 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[1, 1, 2, 1] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = TFRegNetModel(config=UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRegNetForImageClassification(UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Union[str, Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowercase :Optional[int] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase :List[str] = False
__lowercase :List[str] = False
__lowercase :List[Any] = False
__lowercase :Dict = False
__lowercase :List[str] = False
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = TFRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase__ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = model_class(UpperCamelCase__ )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__={} ):
lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCamelCase__ , UpperCamelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFRegNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase_ = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) | 142 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase = 4_2
lowerCAmelCase = 4_2
class UpperCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase = 1
@register_to_config
def __init__( self , a = 20_00 , a = 0.15 , a = 0.01 , a = 13_48.0 , a = 1E-5 , a = 1 , ) -> Optional[int]:
# standard deviation of the initial noise distribution
snake_case_ = sigma_max
# setable values
snake_case_ = None
self.set_sigmas(__A , __A , __A , __A )
def _UpperCamelCase ( self , a , a = None ) -> int:
return sample
def _UpperCamelCase ( self , a , a = None , a = None ) -> List[Any]:
snake_case_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case_ = torch.linspace(1 , __A , __A , device=__A )
def _UpperCamelCase ( self , a , a = None , a = None , a = None ) -> Optional[int]:
snake_case_ = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case_ = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__A , __A )
snake_case_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case_ = torch.exp(torch.linspace(math.log(__A ) , math.log(__A ) , __A ) )
snake_case_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCamelCase ( self , a , a ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCamelCase ( self , a , a , a , a = None , a = True , ) -> Dict:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
snake_case_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case_ = timesteps.to(self.discrete_sigmas.device )
snake_case_ = self.discrete_sigmas[timesteps].to(sample.device )
snake_case_ = self.get_adjacent_sigma(__A , __A ).to(sample.device )
snake_case_ = torch.zeros_like(__A )
snake_case_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case_ = diffusion.unsqueeze(-1 )
snake_case_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case_ = randn_tensor(
sample.shape , layout=sample.layout , generator=__A , device=sample.device , dtype=sample.dtype )
snake_case_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__A , prev_sample_mean=__A )
def _UpperCamelCase ( self , a , a , a = None , a = True , ) -> List[str]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case_ = randn_tensor(sample.shape , layout=sample.layout , generator=__A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case_ = step_size.unsqueeze(-1 )
snake_case_ = sample + step_size * model_output
snake_case_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def _UpperCamelCase ( self , a , a , a , ) -> Optional[int]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case_ = timesteps.to(original_samples.device )
snake_case_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__A ) * sigmas[:, None, None, None]
)
snake_case_ = noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 703 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , a = 3 , a = 3 , a = ("DownEncoderBlock2D",) , a = ("UpDecoderBlock2D",) , a = (64,) , a = 1 , a = "silu" , a = 3 , a = 32 , a = 2_56 , a = 32 , a = None , a = 0.18_215 , a = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
snake_case_ = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
snake_case_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case_ = nn.Convad(a , a , 1 )
snake_case_ = VectorQuantizer(a , a , beta=0.25 , remap=a , sane_index_shape=a )
snake_case_ = nn.Convad(a , a , 1 )
# pass init params to Decoder
snake_case_ = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , norm_type=a , )
@apply_forward_hook
def _UpperCamelCase ( self , a , a = True ) -> VQEncoderOutput:
snake_case_ = self.encoder(a )
snake_case_ = self.quant_conv(a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=a )
@apply_forward_hook
def _UpperCamelCase ( self , a , a = False , a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case_ , snake_case_ , snake_case_ = self.quantize(a )
else:
snake_case_ = h
snake_case_ = self.post_quant_conv(a )
snake_case_ = self.decoder(a , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def _UpperCamelCase ( self , a , a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case_ = sample
snake_case_ = self.encode(a ).latents
snake_case_ = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
| 607 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : str , *snake_case : Tuple , **snake_case : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 517 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_ = "src/transformers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*try:")
# Catches a line with else:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*else:")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
__a : Optional[Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Any = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
__a : List[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__a : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
__a : int = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__a : Union[str, Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
__a : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__a : Optional[Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__a : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__a : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : Optional[Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Any = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : List[Any] = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Union[str, Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__a : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__a : Any = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__a : Any = lines[line_index]
__a : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__a : List[Any] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__a : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__a : int = lines[line_index]
__a : List[Any] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__a : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__a : Any = []
for key in import_dict_objects.keys():
__a : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__a : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__a : List[Any] = 'base imports' if key == 'none' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCAmelCase__ ( ):
__a : List[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
__a : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' )
__a : Optional[Any] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
__a : str = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Optional[int] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE__ ) )
def lowerCAmelCase__ ( ):
__a : Optional[Any] = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__a : Optional[Any] = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
__a : str = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
SCREAMING_SNAKE_CASE_ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowerCAmelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
__a : int = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__a : Optional[Any] = spec.loader.load_module()
__a : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Union[str, Any] = '\n'.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 597 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = int(np.ceil((x_end - xa) / step_size ) )
__A = np.zeros((n + 1,) )
__A = ya
__A = xa
for k in range(__UpperCamelCase ):
__A = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
__A = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__A = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.abs(np.fft.fft(__UpperCamelCase ) )
__A = 2_0 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__A = get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__UpperCamelCase )
plt.show()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 215 | 0 |
import random
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : Dict ) -> tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
SCREAMING_SNAKE_CASE_ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _partition(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) ) | 31 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
def __init__( self: Union[str, Any], _lowercase: str, _lowercase: Optional[Any]=12, _lowercase: Tuple=7, _lowercase: Union[str, Any]=True, _lowercase: Dict=True, _lowercase: List[Any]=True, _lowercase: int=99, _lowercase: List[str]=32, _lowercase: Dict=32, _lowercase: Optional[Any]=2, _lowercase: Optional[Any]=4, _lowercase: List[str]=37, _lowercase: Any=0.1, _lowercase: List[Any]=0.1, _lowercase: List[str]=512, _lowercase: Optional[int]=0.02, _lowercase: Dict=0, _lowercase: str=None, ):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = projection_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = bos_token_id
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase = input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase = input_mask.shape
__lowerCAmelCase = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(_lowercase):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowercase)
def _lowercase ( self: Dict):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def _lowercase ( self: Dict, _lowercase: Union[str, Any], _lowercase: int, _lowercase: List[str]):
'''simple docstring'''
__lowerCAmelCase = TFBlipTextModel(config=_lowercase)
__lowerCAmelCase = model(_lowercase, attention_mask=_lowercase, training=_lowercase)
__lowerCAmelCase = model(_lowercase, training=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def _lowercase ( self: Optional[int]):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = BlipTextModelTester(self)
__lowerCAmelCase = ConfigTester(self, config_class=_lowercase, hidden_size=37)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def _lowercase ( self: Any):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def _lowercase ( self: str):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Dict):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowercase ( self: str):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFBlipTextModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def _lowercase ( self: Optional[int], _lowercase: Dict=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowercase)
| 334 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def _lowercase ( self: Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""", from_pt=_lowercase, dtype=jnp.bfloataa)
__lowerCAmelCase , __lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""", controlnet=_lowercase, from_pt=_lowercase, dtype=jnp.bfloataa)
__lowerCAmelCase = controlnet_params
__lowerCAmelCase = """bird"""
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples)
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""")
__lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples)
__lowerCAmelCase = jax.random.PRNGKey(0)
__lowerCAmelCase = jax.random.split(_lowercase, jax.device_count())
__lowerCAmelCase = replicate(_lowercase)
__lowerCAmelCase = shard(_lowercase)
__lowerCAmelCase = shard(_lowercase)
__lowerCAmelCase = pipe(
prompt_ids=_lowercase, image=_lowercase, params=_lowercase, prng_seed=_lowercase, num_inference_steps=50, jit=_lowercase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__lowerCAmelCase = images[0, 253:256, 253:256, -1]
__lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten()))
__lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078])
print(f'''output_slice: {output_slice}''')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowercase ( self: Any):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""", from_pt=_lowercase, dtype=jnp.bfloataa)
__lowerCAmelCase , __lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""", controlnet=_lowercase, from_pt=_lowercase, dtype=jnp.bfloataa)
__lowerCAmelCase = controlnet_params
__lowerCAmelCase = """Chef in the kitchen"""
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples)
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""")
__lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples)
__lowerCAmelCase = jax.random.PRNGKey(0)
__lowerCAmelCase = jax.random.split(_lowercase, jax.device_count())
__lowerCAmelCase = replicate(_lowercase)
__lowerCAmelCase = shard(_lowercase)
__lowerCAmelCase = shard(_lowercase)
__lowerCAmelCase = pipe(
prompt_ids=_lowercase, image=_lowercase, params=_lowercase, prng_seed=_lowercase, num_inference_steps=50, jit=_lowercase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__lowerCAmelCase = images[0, 253:256, 253:256, -1]
__lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten()))
__lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]])
print(f'''output_slice: {output_slice}''')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 334 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ : Optional[Any] =model_type_to_module_name(__snake_case )
snake_case__ : List[Any] =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ : List[str] =importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
snake_case__ : List[Any] =get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
snake_case__ : str =kwargs.pop('''config''' , _UpperCamelCase )
snake_case__ : List[Any] =kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
snake_case__ : Union[str, Any] =True
snake_case__ : int =ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase )
snake_case__ : Dict =config_dict.get('''image_processor_type''' , _UpperCamelCase )
snake_case__ : Tuple =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
snake_case__ : Any =config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ : Optional[int] =config_dict.pop('''feature_extractor_type''' , _UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
snake_case__ : Optional[Any] =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case__ : List[str] =config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ : Any =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case__ : Dict =AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ : Optional[Any] =getattr(_UpperCamelCase , '''image_processor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ : Dict =config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ : Any =image_processor_class_from_name(_UpperCamelCase )
snake_case__ : Dict =image_processor_auto_map is not None
snake_case__ : List[Any] =image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ : Optional[Any] =resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ : Tuple =get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
snake_case__ : str =kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ : Optional[Any] =IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )]
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 381 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Any=99 , UpperCamelCase_ : int=13 , UpperCamelCase_ : List[str]=16 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : int=2 , UpperCamelCase_ : str=32 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : int=30 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = decoder_seq_length
# For common tests
__A = self.decoder_seq_length
__A = is_training
__A = use_attention_mask
__A = use_labels
__A = vocab_size
__A = d_model
__A = d_model
__A = decoder_layers
__A = decoder_layers
__A = decoder_ffn_dim
__A = decoder_attention_heads
__A = decoder_attention_heads
__A = eos_token_id
__A = bos_token_id
__A = pad_token_id
__A = decoder_start_token_id
__A = use_cache
__A = max_position_embeddings
__A = None
__A = decoder_seq_length
__A = 2
__A = 1
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A = None
if self.use_attention_mask:
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , ):
"""simple docstring"""
__A = True
__A = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
__A = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__A = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
__A = model(UpperCamelCase_ )
__A = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
__A = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] , dim=-1 )
__A = model(UpperCamelCase_ )["""last_hidden_state"""]
__A = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )["""last_hidden_state"""]
# select random slice
__A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
__A = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
pass
| 711 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
__A = HfArgumentParser(__lowercase )
__A = parser.parse_args_into_dataclasses()[0]
__A = TensorFlowBenchmark(args=__lowercase )
try:
__A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__A = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__A = """ """.join(str(__lowercase ).split(""" """ )[:-1] )
__A = """"""
__A = eval(str(__lowercase ).split(""" """ )[-1] )
__A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
__A = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 199 | 0 |
def __lowerCamelCase ( ) -> int:
return 1
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase = 200 ) -> int:
return two_pound(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(UpperCamelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = divmod(UpperCamelCase_ , 2 )
return binary_recursive(UpperCamelCase_ ) + str(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
__SCREAMING_SNAKE_CASE = """-""" if number.startswith("""-""" ) else """"""
__SCREAMING_SNAKE_CASE = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"{negative}0b{binary_recursive(int(UpperCamelCase_ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 248 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""")
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , lowerCAmelCase__)
def __call__( self , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {k: np.array(lowerCAmelCase__) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__)
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name)
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(lowerCAmelCase__)
if src_path.exists():
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
def snake_case_ ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ):
if os.path.isfile(lowerCAmelCase__):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__)
# load model from hub
else:
# download model
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).parent
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).name
__SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = None
if len(str(lowerCAmelCase__).split("""@""")) == 2:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model_id.split("""@""")
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 248 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : List[str] = 1
while len(_lowerCamelCase ) < 1E6:
constant.append(str(_lowerCamelCase ) )
i += 1
__SCREAMING_SNAKE_CASE : int = """""".join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution()) | 578 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : int = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''PoolFormerFeatureExtractor''']
UpperCamelCase__ : Tuple = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 578 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = """ybelkada/fonts"""
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
requires_backends(_UpperCAmelCase , ['torch'] )
_check_torch_version()
lowerCamelCase__ : Tuple = image_tensor.unsqueeze(0 )
lowerCamelCase__ : int = torch.nn.functional.unfold(_UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCamelCase__ : Dict = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCAmelCase , _UpperCAmelCase , -1 )
lowerCamelCase__ : Tuple = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 36 , _UpperCAmelCase = "black" , _UpperCAmelCase = "white" , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Image.Image:
requires_backends(_UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCamelCase__ : int = textwrap.TextWrapper(width=80 )
lowerCamelCase__ : List[Any] = wrapper.wrap(text=_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = '\n'.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
lowerCamelCase__ : Dict = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
lowerCamelCase__ : Optional[int] = font_path
else:
lowerCamelCase__ : Tuple = hf_hub_download(_UpperCAmelCase , 'Arial.TTF' )
lowerCamelCase__ : Optional[int] = ImageFont.truetype(_UpperCAmelCase , encoding='UTF-8' , size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCamelCase__ : str = ImageDraw.Draw(Image.new('RGB' , (1, 1) , _UpperCAmelCase ) )
lowerCamelCase__ : str = temp_draw.textbbox((0, 0) , _UpperCAmelCase , _UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
lowerCamelCase__ : Optional[int] = text_width + left_padding + right_padding
lowerCamelCase__ : Dict = text_height + top_padding + bottom_padding
lowerCamelCase__ : str = Image.new('RGB' , (image_width, image_height) , _UpperCAmelCase )
lowerCamelCase__ : List[Any] = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=_UpperCAmelCase , fill=_UpperCAmelCase , font=_UpperCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(_UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
lowerCamelCase__ : List[str] = to_pil_image(_UpperCAmelCase )
lowerCamelCase__ : Any = render_text(_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase__ : Optional[int] = max(header_image.width , image.width )
lowerCamelCase__ : List[str] = int(image.height * (new_width / image.width) )
lowerCamelCase__ : List[str] = int(header_image.height * (new_width / header_image.width) )
lowerCamelCase__ : Union[str, Any] = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCamelCase__ : List[str] = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
lowerCamelCase__ : List[Any] = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST )
return new_image
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""flattened_patches"""]
def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 2048 , UpperCAmelCase : bool = False , **UpperCAmelCase : List[Any] , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCamelCase__ : Optional[int] = do_normalize
lowerCamelCase__ : List[str] = do_convert_rgb
lowerCamelCase__ : Any = max_patches
lowerCamelCase__ : int = is_vqa
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : dict , **UpperCAmelCase : Optional[Any] ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCamelCase__ : List[str] = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.FIRST )
lowerCamelCase__ : Any = torch.from_numpy(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = patch_size['height'], patch_size['width']
lowerCamelCase__ : List[str] = get_image_size(UpperCAmelCase )
# maximize scale s.t.
lowerCamelCase__ : Dict = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCamelCase__ : str = max(min(math.floor(scale * image_height / patch_height ) , UpperCAmelCase ) , 1 )
lowerCamelCase__ : Optional[Any] = max(min(math.floor(scale * image_width / patch_width ) , UpperCAmelCase ) , 1 )
lowerCamelCase__ : List[str] = max(num_feasible_rows * patch_height , 1 )
lowerCamelCase__ : Any = max(num_feasible_cols * patch_width , 1 )
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCAmelCase , antialias=UpperCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCamelCase__ : str = torch_extract_patches(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = patches.shape
lowerCamelCase__ : Tuple = patches_shape[1]
lowerCamelCase__ : int = patches_shape[2]
lowerCamelCase__ : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCamelCase__ : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCamelCase__ : Optional[Any] = torch.arange(UpperCAmelCase ).reshape([rows, 1] ).repeat(1 , UpperCAmelCase ).reshape([rows * columns, 1] )
lowerCamelCase__ : List[str] = torch.arange(UpperCAmelCase ).reshape([1, columns] ).repeat(UpperCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCamelCase__ : str = row_ids.to(torch.floataa )
lowerCamelCase__ : Any = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCamelCase__ : Dict = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCamelCase__ : List[str] = torch.nn.functional.pad(UpperCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCamelCase__ : Any = to_numpy_array(UpperCAmelCase )
return result
def A_ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple ) -> np.ndarray:
if image.dtype == np.uinta:
lowerCamelCase__ : Union[str, Any] = image.astype(np.floataa )
# take mean across the whole `image`
lowerCamelCase__ : List[Any] = np.mean(UpperCAmelCase )
lowerCamelCase__ : Dict = np.std(UpperCAmelCase )
lowerCamelCase__ : Tuple = max(UpperCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Dict[str, int]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Tuple , ) -> ImageInput:
lowerCamelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : str = patch_size if patch_size is not None else self.patch_size
lowerCamelCase__ : Tuple = max_patches if max_patches is not None else self.max_patches
lowerCamelCase__ : Optional[int] = self.is_vqa
if kwargs.get('data_format' , UpperCAmelCase ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCamelCase__ : Optional[Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Dict = [convert_to_rgb(UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : List[str] = [to_numpy_array(UpperCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCamelCase__ : str = kwargs.pop('font_bytes' , UpperCAmelCase )
lowerCamelCase__ : Dict = kwargs.pop('font_path' , UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Tuple = [header_text] * len(UpperCAmelCase )
lowerCamelCase__ : Tuple = [
render_header(UpperCAmelCase , header_text[i] , font_bytes=UpperCAmelCase , font_path=UpperCAmelCase )
for i, image in enumerate(UpperCAmelCase )
]
if do_normalize:
lowerCamelCase__ : Optional[int] = [self.normalize(image=UpperCAmelCase ) for image in images]
# convert to torch tensor and permute
lowerCamelCase__ : int = [
self.extract_flattened_patches(image=UpperCAmelCase , max_patches=UpperCAmelCase , patch_size=UpperCAmelCase )
for image in images
]
# create attention mask in numpy
lowerCamelCase__ : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCamelCase__ : str = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCAmelCase )
return encoded_outputs
| 702 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.