code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__UpperCamelCase = logging.getLogger(__name__)
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
__snake_case : Optional[int] = parser.parse_args()
return args
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
def fn(_lowerCamelCase ):
return tokenizer(examples["""text"""] )
return fn
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
__snake_case : Optional[int] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
__snake_case : List[str] = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
__snake_case : Optional[int] = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
__snake_case : Optional[Any] = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
__snake_case : Union[str, Any] = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Optional[int] = tokenize_function(SCREAMING_SNAKE_CASE__ )
__snake_case : List[str] = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase ):
# Concatenate all texts.
__snake_case : Dict = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : Any = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Tuple = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
__snake_case : str = 0
__snake_case : Tuple = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Optional[int] = len(dataset_snapshot["""input_ids"""] )
__snake_case : Any = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__snake_case : Dict = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__snake_case : str = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase = parse_args()
main(args)
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 0 |
import warnings
from .generation import TFGenerationMixin
class __lowercase ( lowerCamelCase__ ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase__ , )
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = row[0]
for column_index, column in enumerate(lowerCamelCase_ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE : Tuple = column
continue
SCREAMING_SNAKE_CASE : Optional[int] = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE : Any = current_set[0]
SCREAMING_SNAKE_CASE : Any = [first_row]
SCREAMING_SNAKE_CASE : int = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase_ )
continue
for column_index in range(len(lowerCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE : List[Any] = final_set[0]
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE : Optional[int] = simplify(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = resultant
return final_set
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) + 1
if any(len(lowerCamelCase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCamelCase_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE : Dict = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE : int = data_set.copy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for row_index, row in enumerate(lowerCamelCase_ ):
if 0 not in row:
SCREAMING_SNAKE_CASE : str = data_set.pop(lowerCamelCase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_set.copy()
SCREAMING_SNAKE_CASE : Tuple = simplify(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = simplified[::-1]
SCREAMING_SNAKE_CASE : Dict = []
for row in simplified:
SCREAMING_SNAKE_CASE : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE : List[Any] = row.copy()[: len(lowerCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase_ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE : Dict = temp_row[1::]
SCREAMING_SNAKE_CASE : Dict = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = []
for item in solutions:
final.append(float(round(lowerCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase :Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 251 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ : Tuple = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(lowerCamelCase_ ) , version.parse(lowerCamelCase_ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ = None ):
'''simple docstring'''
lowercase__ = F"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = requirement, None, None
else:
lowercase__ = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCamelCase_ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F""" got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_full.split(''',''' ) # there could be multiple requirements
lowercase__ = {}
for w in want_range:
lowercase__ = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCamelCase_ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F""" but got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase__ = '''.'''.join([str(lowerCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return
# check if any version is installed
try:
lowercase__ = importlib.metadata.version(lowerCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCamelCase_ , lowerCamelCase_ )
| 183 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase (a_ :int) -> int:
lowercase :Optional[int] = prime_factors(a_)
if is_square_free(a_):
return -1 if len(a_) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 |
"""simple docstring"""
UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase (a_ :int) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_))
def lowerCamelCase () -> int:
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(a_))
if __name__ == "__main__":
print(solution())
| 475 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( snake_case_ , unittest.TestCase ):
UpperCamelCase :Union[str, Any] = WavaVecaPhonemeCTCTokenizer
UpperCamelCase :List[str] = False
def _snake_case (self ):
super().setUp()
lowerCamelCase__ : Optional[int] = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(""" """ )
lowerCamelCase__ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase__ : Dict = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
def _snake_case (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ):
lowerCamelCase__ : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )) for i in range(len(__magic_name__ ) )]
lowerCamelCase__ : List[Any] = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
lowerCamelCase__ : List[str] = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
lowerCamelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : List[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : str = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
lowerCamelCase__ : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
lowerCamelCase__ : Union[str, Any] = " " + output_txt
lowerCamelCase__ : Any = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def _snake_case (self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
lowerCamelCase__ : Optional[Any] = tokenizer("""m xxx ɪ""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
lowerCamelCase__ : List[Any] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase__ : Optional[Any] = tokenizer("""maɪ c""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [3, 200] ) # mai should be <unk> (=3)
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase__ : List[Any] = "Hello how are you"
lowerCamelCase__ : Any = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(__magic_name__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def _snake_case (self ):
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase__ : int = "Hello how are you"
lowerCamelCase__ : Optional[Any] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__magic_name__ ).input_ids , tokenizer(__magic_name__ , do_phonemize=__magic_name__ ).input_ids )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase__ : str = "Hello how are you"
lowerCamelCase__ : Any = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase__ : Tuple = tokenizer.decode(tokenizer(__magic_name__ ).input_ids )
self.assertEqual(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase__ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(sample_ids[0] )
lowerCamelCase__ : int = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase__ : Any = "Hello how are you"
lowerCamelCase__ : Optional[int] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(__magic_name__ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase__ : Optional[Any] = "Hello how are you"
lowerCamelCase__ : Union[str, Any] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__magic_name__ ).input_ids , tokenizer(__magic_name__ , do_phonemize=__magic_name__ ).input_ids )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
lowerCamelCase__ : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase__ : Optional[int] = tokenizer.decode(sample_ids[0] )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
lowerCamelCase__ : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.batch_decode(__magic_name__ , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase__ : Any = "Hello how are you"
lowerCamelCase__ : Optional[Any] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase__ : int = tokenizer.decode(tokenizer(__magic_name__ ).input_ids , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase__ : str = "Hello how are you"
lowerCamelCase__ : Optional[Any] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase__ : Dict = tokenizer.decode(tokenizer(__magic_name__ ).input_ids , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=__magic_name__ )
lowerCamelCase__ : Optional[int] = "Hello how are you"
lowerCamelCase__ : List[Any] = tokenizer(__magic_name__ , phonemizer_lang="""en-us""" ).input_ids
lowerCamelCase__ : Tuple = tokenizer(__magic_name__ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : str = tokenizer.decode(__magic_name__ )
lowerCamelCase__ : Dict = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(__magic_name__ , """ɛ l o h aʊ a ʁ j u""" )
def _snake_case (self ):
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase__ : Union[str, Any] = "Hello how Are you"
lowerCamelCase__ : Optional[int] = "hello how are you"
lowerCamelCase__ : Dict = tokenizer(__magic_name__ ).input_ids
lowerCamelCase__ : Any = tokenizer(__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
lowerCamelCase__ : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def _snake_case (__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = [d[key] for d in offsets]
return retrieved_list
def _snake_case (self ):
lowerCamelCase__ : int = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase__ : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCamelCase__ : List[str] = tokenizer.decode(__magic_name__ , output_char_offsets=__magic_name__ , filter_word_delimiter_token=__magic_name__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(__magic_name__ , __magic_name__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(__magic_name__ , __magic_name__ ):
self.assertTrue(isinstance(__magic_name__ , __magic_name__ ) )
self.assertTrue(isinstance(outputs_list[0] , __magic_name__ ) )
# transform list to ModelOutput
lowerCamelCase__ : Tuple = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(__magic_name__ , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
[recursive_check(__magic_name__ , __magic_name__ ) for la, la in zip(__magic_name__ , __magic_name__ )]
self.assertEqual(__magic_name__ , __magic_name__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
lowerCamelCase__ : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase__ : List[str] = tokenizer.batch_decode(__magic_name__ , output_char_offsets=__magic_name__ )
lowerCamelCase__ : List[str] = [tokenizer.decode(__magic_name__ , output_char_offsets=__magic_name__ ) for ids in sample_ids]
check_list_tuples_equal(__magic_name__ , __magic_name__ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def _snake_case (self ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def _snake_case (self ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def _snake_case (self ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def _snake_case (self ):
pass
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Any = tokenizer.vocab_size
lowerCamelCase__ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase__ : List[str] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowerCamelCase__ : Union[str, Any] = tokenizer.add_tokens(__magic_name__ )
lowerCamelCase__ : Any = tokenizer.vocab_size
lowerCamelCase__ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
lowerCamelCase__ : Any = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase__ : str = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowerCamelCase__ : Union[str, Any] = tokenizer.add_special_tokens(__magic_name__ )
lowerCamelCase__ : str = tokenizer.vocab_size
lowerCamelCase__ : Any = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
lowerCamelCase__ : str = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _snake_case (self ):
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _snake_case (self ):
pass
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : List[Any] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
lowerCamelCase__ : Optional[Any] = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(output["""text"""] , __magic_name__ )
| 157 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = parent
lowercase__ : str = batch_size
lowercase__ : Dict = seq_length
lowercase__ : Optional[int] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : int = num_labels
lowercase__ : Optional[Any] = num_choices
lowercase__ : Optional[Any] = scope
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Union[str, Any] = None
if self.use_input_mask:
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : str = None
lowercase__ : List[str] = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Dict:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=lowerCamelCase , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = OpenLlamaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase )
lowercase__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = True
lowercase__ : Optional[int] = OpenLlamaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Dict = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
lowercase__ : Optional[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
lowercase__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = True
lowercase__ : str = True
lowercase__ : List[str] = OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
lowercase__ : List[str] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
lowercase__ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
lowercase__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
lowercase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Any = config_and_inputs
lowercase__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
a : Optional[Any] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple = False
a : Any = False
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = OpenLlamaModelTester(self )
lowercase__ : Dict = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Union[str, Any] = input_dict["input_ids"]
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Any = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : Tuple = "single_label_classification"
lowercase__ : Optional[Any] = input_dict["input_ids"]
lowercase__ : List[str] = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : List[Any] = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = 3
lowercase__ : Dict = "multi_label_classification"
lowercase__ : Optional[int] = input_dict["input_ids"]
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : List[str] = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __a ( self ) -> Optional[int]:
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __a ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = ids_tensor([1, 10] , config.vocab_size )
lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : Union[str, Any] = OpenLlamaModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
lowercase__ : str = original_model(lowerCamelCase ).last_hidden_state
lowercase__ : Union[str, Any] = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : Union[str, Any] = {"type": scaling_type, "factor": 10.0}
lowercase__ : Dict = OpenLlamaModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
lowercase__ : Any = scaled_model(lowerCamelCase ).last_hidden_state
lowercase__ : int = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) ) | 397 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
"""simple docstring"""
__snake_case : List[str] = "unispeech"
def __init__( self :List[str] , __lowercase :Dict=32 , __lowercase :List[str]=768 , __lowercase :str=12 , __lowercase :int=12 , __lowercase :int=3072 , __lowercase :Any="gelu" , __lowercase :List[Any]=0.1 , __lowercase :List[Any]=0.1 , __lowercase :Optional[int]=0.1 , __lowercase :str=0.0 , __lowercase :int=0.0 , __lowercase :Dict=0.1 , __lowercase :int=0.1 , __lowercase :List[str]=0.02 , __lowercase :Tuple=1e-5 , __lowercase :Optional[int]="group" , __lowercase :List[str]="gelu" , __lowercase :Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , __lowercase :Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowercase :List[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowercase :int=False , __lowercase :Union[str, Any]=128 , __lowercase :List[Any]=16 , __lowercase :List[str]=False , __lowercase :List[Any]=True , __lowercase :List[Any]=0.05 , __lowercase :Dict=10 , __lowercase :Union[str, Any]=2 , __lowercase :List[Any]=0.0 , __lowercase :Tuple=10 , __lowercase :Optional[Any]=0 , __lowercase :str=320 , __lowercase :Dict=2 , __lowercase :int=0.1 , __lowercase :Optional[int]=100 , __lowercase :Optional[Any]=256 , __lowercase :Any=256 , __lowercase :Any=0.1 , __lowercase :str="mean" , __lowercase :Optional[Any]=False , __lowercase :Tuple=False , __lowercase :str=256 , __lowercase :List[str]=80 , __lowercase :Tuple=0 , __lowercase :Optional[Any]=1 , __lowercase :Optional[Any]=2 , __lowercase :str=0.5 , **__lowercase :Union[str, Any] , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase : Any =hidden_size
__lowerCamelCase : Any =feat_extract_norm
__lowerCamelCase : Optional[int] =feat_extract_activation
__lowerCamelCase : Tuple =list(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] =list(lowerCamelCase__ )
__lowerCamelCase : Tuple =list(lowerCamelCase__ )
__lowerCamelCase : List[str] =conv_bias
__lowerCamelCase : int =num_conv_pos_embeddings
__lowerCamelCase : Dict =num_conv_pos_embedding_groups
__lowerCamelCase : Union[str, Any] =len(self.conv_dim )
__lowerCamelCase : Optional[int] =num_hidden_layers
__lowerCamelCase : Tuple =intermediate_size
__lowerCamelCase : List[Any] =hidden_act
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : Dict =hidden_dropout
__lowerCamelCase : List[str] =attention_dropout
__lowerCamelCase : int =activation_dropout
__lowerCamelCase : Union[str, Any] =feat_proj_dropout
__lowerCamelCase : List[Any] =final_dropout
__lowerCamelCase : List[str] =layerdrop
__lowerCamelCase : Union[str, Any] =layer_norm_eps
__lowerCamelCase : Tuple =initializer_range
__lowerCamelCase : int =num_ctc_classes
__lowerCamelCase : Tuple =vocab_size
__lowerCamelCase : List[str] =do_stable_layer_norm
__lowerCamelCase : Optional[Any] =use_weighted_layer_sum
__lowerCamelCase : Dict =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : List[str] =apply_spec_augment
__lowerCamelCase : int =mask_time_prob
__lowerCamelCase : Union[str, Any] =mask_time_length
__lowerCamelCase : str =mask_time_min_masks
__lowerCamelCase : Dict =mask_feature_prob
__lowerCamelCase : Any =mask_feature_length
__lowerCamelCase : Union[str, Any] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase : List[str] =num_codevectors_per_group
__lowerCamelCase : str =num_codevector_groups
__lowerCamelCase : Optional[Any] =contrastive_logits_temperature
__lowerCamelCase : Union[str, Any] =feat_quantizer_dropout
__lowerCamelCase : Any =num_negatives
__lowerCamelCase : str =codevector_dim
__lowerCamelCase : List[str] =proj_codevector_dim
__lowerCamelCase : Dict =diversity_loss_weight
# ctc loss
__lowerCamelCase : Optional[int] =ctc_loss_reduction
__lowerCamelCase : Tuple =ctc_zero_infinity
# pretraining loss
__lowerCamelCase : List[str] =replace_prob
@property
def __lowercase ( self :str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 718 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[str] , *__lowercase :int , **__lowercase :Any ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 363 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MaskFormerFeatureExtractor']
UpperCamelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCamelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 61 |
__magic_name__ : str = 8.314_4598
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K')
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol')
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__magic_name__ : List[Any] = 300
__magic_name__ : Union[str, Any] = 28
__magic_name__ : str = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 280 | 0 |
"""simple docstring"""
def _A ( __lowercase = 100_0000 ):
"""simple docstring"""
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = {1: 1}
for inputa in range(2 , __lowercase ):
lowerCamelCase__ = 0
lowerCamelCase__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase__ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase__ = counter
if counter > pre_counter:
lowerCamelCase__ = inputa
lowerCamelCase__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 258 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Distribution , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=0 ):
lowerCamelCase__ = 1.0 if scale is None else scale
lowerCamelCase__ = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] )
@property
def __UpperCAmelCase ( self : Dict ):
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self : List[str] ):
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self : int ):
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = args_dim
lowerCamelCase__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] )
lowerCamelCase__ = domain_map
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__()
lowerCamelCase__ = function
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ :
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 1 ):
lowerCamelCase__ = dim
lowerCamelCase__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , ):
lowerCamelCase__ = self._base_distribution(SCREAMING_SNAKE_CASE_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self : Optional[int] ):
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self : List[Any] ):
return len(self.event_shape )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return 0.0
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"df": 1, "loc": 1, "scale": 1}
snake_case = StudentT
@classmethod
def __UpperCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"loc": 1, "scale": 1}
snake_case = Normal
@classmethod
def __UpperCAmelCase ( cls : Tuple , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"total_count": 1, "logits": 1}
snake_case = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 258 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = 0
def UpperCAmelCase ( self : Optional[int] ) -> Any:
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Dict = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Tuple = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCAmelCase : Union[str, Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Optional[Any] = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop("""image_processor_type""" )
__UpperCAmelCase : List[Any] = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
__UpperCAmelCase : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = Path(__lowercase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , """clip-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
with self.assertRaisesRegex(
__lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(__lowercase , revision="""aaaaaa""" )
def UpperCAmelCase ( self : Dict ) -> Any:
with self.assertRaisesRegex(
__lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Any = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self : Optional[Any] ) -> str:
class a ( lowercase__ ):
"""simple docstring"""
a : Any = True
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 63 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__lowercase = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCamelCase :Tuple = bs[:]
__UpperCamelCase :int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__UpperCamelCase :List[Any] = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = set()
__UpperCamelCase :List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase :Optional[Any] = char
return pairs
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> int:
__UpperCamelCase :Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token
__UpperCamelCase :Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token
__UpperCamelCase :Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''') as vocab_handle:
__UpperCamelCase :List[str] = json.load(__lowercase)
__UpperCamelCase :List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase :Union[str, Any] = errors # how to handle errors in decoding
__UpperCamelCase :Tuple = bytes_to_unicode()
__UpperCamelCase :Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''') as merges_handle:
__UpperCamelCase :Optional[int] = merges_handle.read().split('''\n''')[1:-1]
__UpperCamelCase :Any = [tuple(merge.split()) for merge in bpe_merges]
__UpperCamelCase :List[Any] = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Union[str, Any] = {}
__UpperCamelCase :str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase :List[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def UpperCamelCase__ ( self) -> str:
return len(self.encoder)
def UpperCamelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCamelCase__ ( self , __lowercase) -> str:
if token in self.cache:
return self.cache[token]
__UpperCamelCase :Tuple = tuple(__lowercase)
__UpperCamelCase :Optional[int] = get_pairs(__lowercase)
if not pairs:
return token
while True:
__UpperCamelCase :Tuple = min(__lowercase , key=lambda __lowercase: self.bpe_ranks.get(__lowercase , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase :Optional[int] = bigram
__UpperCamelCase :Tuple = []
__UpperCamelCase :Dict = 0
while i < len(__lowercase):
try:
__UpperCamelCase :Dict = word.index(__lowercase , __lowercase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__UpperCamelCase :Union[str, Any] = j
if word[i] == first and i < len(__lowercase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCamelCase :Tuple = tuple(__lowercase)
__UpperCamelCase :List[Any] = new_word
if len(__lowercase) == 1:
break
else:
__UpperCamelCase :Optional[int] = get_pairs(__lowercase)
__UpperCamelCase :Any = ''' '''.join(__lowercase)
__UpperCamelCase :Dict = word
return word
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Optional[Any] = []
for token in re.findall(self.pat , __lowercase):
__UpperCamelCase :List[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase).split(''' '''))
return bpe_tokens
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token))
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
return self.decoder.get(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = ''''''.join(__lowercase)
__UpperCamelCase :Tuple = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(__lowercase , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase) + '''\n''')
__UpperCamelCase :int = 0
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
__UpperCamelCase :Optional[int] = token_index
writer.write(''' '''.join(__lowercase) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Optional[Any] = [self.cls_token_id]
__UpperCamelCase :str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Optional[int] = [self.sep_token_id]
__UpperCamelCase :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase__ ( self , __lowercase , __lowercase=False , **__lowercase) -> Any:
__UpperCamelCase :List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__lowercase) > 0 and not text[0].isspace()):
__UpperCamelCase :Optional[Any] = ''' ''' + text
return (text, kwargs)
| 167 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_lowercase : Optional[Any] =["bert-base-uncased", "bert-base-cased"]
_lowercase : Dict ="hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class snake_case__ (tf.keras.Model ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Any:
"""simple docstring"""
super().__init__()
a__ : int = tokenizer
a__ : int = AutoConfig.from_pretrained(__lowercase )
a__ : str = TFAutoModel.from_config(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : Optional[int] = self.tokenizer(__lowercase )
a__ : Dict = self.bert(**__lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
super().setUp()
a__ : Dict = [
BertTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
a__ : Any = [TFBertTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__lowercase , use_fast_bert_tokenizer=__lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a__ : Any = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
a__ : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
a__ : Optional[int] = tokenizer(__lowercase , return_tensors="""tf""" , padding="""longest""" )
a__ : Any = tf_tokenizer(__lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
a__ : str = tf_tokenizer(self.paired_sentences )
a__ : int = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
a__ : Dict = tf.function(__lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
a__ : Optional[int] = tf.constant(__lowercase )
a__ : Union[str, Any] = compiled_tokenizer(__lowercase )
a__ : str = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
a__ : List[Any] = ModelToSave(tokenizer=__lowercase )
a__ : str = tf.convert_to_tensor(self.test_sentences )
a__ : Any = model(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a__ : List[Any] = Path(__lowercase ) / """saved.model"""
model.save(__lowercase )
a__ : List[Any] = tf.keras.models.load_model(__lowercase )
a__ : Optional[Any] = loaded_model(__lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 136 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : int) -> Any:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int]=True) -> str:
"""simple docstring"""
model.train()
a__ : Optional[Any] = model(_lowercase)
a__ : Optional[int] = F.mse_loss(_lowercase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Any=False) -> Optional[int]:
"""simple docstring"""
set_seed(42)
a__ : Any = RegressionModel()
a__ : int = deepcopy(_lowercase)
a__ : Dict = RegressionDataset(length=80)
a__ : Union[str, Any] = DataLoader(_lowercase , batch_size=16)
model.to(accelerator.device)
if sched:
a__ : Optional[int] = AdamW(params=model.parameters() , lr=1e-3)
a__ : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1e-3)
a__ : Tuple = LambdaLR(_lowercase , lr_lambda=lambda _lowercase: epoch**0.65)
a__ : Tuple = LambdaLR(_lowercase , lr_lambda=lambda _lowercase: epoch**0.65)
# Make a copy of `model`
if sched:
a__ , a__ , a__ , a__ : Optional[Any] = accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase)
else:
a__ , a__ : Optional[Any] = accelerator.prepare(_lowercase , _lowercase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( _lowercase : List[str]) -> int:
"""simple docstring"""
# Test when on a single CPU or GPU that the context manager does nothing
a__ , a__ , a__ : int = get_training_setup(_lowercase)
# Use a single batch
a__ , a__ : Union[str, Any] = next(iter(_lowercase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a__ , a__ : List[Any] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
else:
# Sync grads
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowercase , _lowercase , _lowercase , _lowercase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : Union[str, Any] = ddp_input[torch.randperm(len(_lowercase))]
def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[str]:
"""simple docstring"""
# Test on distributed setup that context manager behaves properly
a__ , a__ , a__ : Optional[Any] = get_training_setup(_lowercase)
# Use a single batch
a__ , a__ : Optional[Any] = next(iter(_lowercase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a__ , a__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : str = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
else:
# Sync grads
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : List[Any] = ddp_input[torch.randperm(len(_lowercase))]
def lowerCAmelCase_ ( _lowercase : int=False , _lowercase : Any=False) -> List[str]:
"""simple docstring"""
a__ : List[Any] = Accelerator(
split_batches=_lowercase , dispatch_batches=_lowercase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a__ , a__ , a__ : Dict = get_training_setup(_lowercase)
for iteration, batch in enumerate(_lowercase):
a__ , a__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
a__ , a__ : Tuple = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowercase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : str = ddp_input[torch.randperm(len(_lowercase))]
GradientState._reset_state()
def lowerCAmelCase_ ( _lowercase : Dict=False , _lowercase : Union[str, Any]=False) -> Optional[Any]:
"""simple docstring"""
a__ : int = Accelerator(
split_batches=_lowercase , dispatch_batches=_lowercase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a__ , a__ , a__ , a__ , a__ , a__ , a__ : Optional[int] = get_training_setup(_lowercase , _lowercase)
for iteration, batch in enumerate(_lowercase):
a__ , a__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
a__ , a__ : List[str] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowercase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
a__ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowercase))
if accelerator.num_processes > 1:
check_model_parameters(_lowercase , _lowercase , _lowercase , _lowercase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : int = Accelerator()
a__ : List[Any] = RegressionDataset(length=80)
a__ : Dict = DataLoader(_lowercase , batch_size=16)
a__ : Dict = RegressionDataset(length=96)
a__ : Optional[int] = DataLoader(_lowercase , batch_size=16)
a__ , a__ : Optional[int] = accelerator.prepare(_lowercase , _lowercase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowercase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowercase)
if iteration < len(_lowercase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowercase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowercase)
if batch_num < len(_lowercase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = Accelerator()
a__ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""")
test_noop_sync(_lowercase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""")
test_distributed_sync(_lowercase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowercase , _lowercase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 136 | 1 |
from math import pow, sqrt
def a ( *SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 708 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = BioGptTokenizer
UpperCamelCase_ = False
def lowerCAmelCase__ ( self) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase))))
UpperCamelCase__ : Optional[Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(UpperCamelCase))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(UpperCamelCase))
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[int]:
UpperCamelCase__ : str = 'lower newer'
UpperCamelCase__ : Optional[Any] = 'lower newer'
return input_text, output_text
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Tuple = BioGptTokenizer(self.vocab_file , self.merges_file)
UpperCamelCase__ : str = 'lower'
UpperCamelCase__ : Union[str, Any] = ['low', 'er</w>']
UpperCamelCase__ : int = tokenizer.tokenize(UpperCamelCase)
self.assertListEqual(UpperCamelCase , UpperCamelCase)
UpperCamelCase__ : int = tokens + ['<unk>']
UpperCamelCase__ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase) , UpperCamelCase)
@slow
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ : Any = BioGptTokenizer.from_pretrained('microsoft/biogpt')
UpperCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase)
UpperCamelCase__ : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase)
UpperCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase)
UpperCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 410 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ : int = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
UpperCAmelCase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> Any:
UpperCamelCase__ : Optional[int] = cn.convert_to_negative(__SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _lowercase ( ) -> str:
UpperCamelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : List[str] = canny.canny(__SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Dict:
assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> Optional[int]:
# laplace diagonals
UpperCamelCase__ : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase__ : Tuple = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
assert res.any()
def _lowercase ( ) -> List[str]:
assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ : int = sob.sobel_filter(__SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _lowercase ( ) -> Optional[Any]:
UpperCamelCase__ : str = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
UpperCamelCase__ : int = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : Optional[Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase__ : List[str] = imread(__SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = image[x_coordinate][y_coordinate]
UpperCamelCase__ : str = lbp.get_neighbors_pixel(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : Any = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 410 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 714 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def __A ( a_ : List[str] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 551 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCamelCase__ ):
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_tool("""text-to-speech""" )
self.tool.setup()
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) | 6 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , ) -> List[Any]:
a_ : List[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
a_ : Optional[int] = parent
a_ : List[str] = batch_size
a_ : List[str] = num_channels
a_ : str = image_size
a_ : Any = min_resolution
a_ : Dict = max_resolution
a_ : List[str] = size
a_ : str = do_normalize
a_ : Optional[Any] = do_convert_rgb
a_ : Any = [512, 1024, 2048, 4096]
a_ : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = PixaStructImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
a_ : int = 2048
a_ : Tuple = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : List[Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# Initialize image_processor
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a_ : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
a_ : str = '''Hello'''
a_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : str = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Union[str, Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Initialize image_processor
a_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : Any = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
a_ : Tuple = 3
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 702 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[int] , a : Optional[Any]="" , a : str="train" )-> int:
"""simple docstring"""
assert os.path.isdir(a )
lowercase__ = []
lowercase__ = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase__ = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Dict )-> Optional[Any]:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : Union[str, Any] , a : Dict )-> str:
"""simple docstring"""
lowercase__ = self.documents[idx]
lowercase__ = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowercase__ = source.read()
lowercase__ , lowercase__ = process_story(a )
return document_name, story_lines, summary_lines
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = list(filter(lambda _SCREAMING_SNAKE_CASE : len(_SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowercase__ = [_add_missing_period(_SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
lowercase__ = []
lowercase__ = deque(_SCREAMING_SNAKE_CASE )
while True:
try:
lowercase__ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase__ = list(filter(lambda _SCREAMING_SNAKE_CASE : not t.startswith('@highlight' ) , _SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if len(_SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_SCREAMING_SNAKE_CASE )) )
return sequence
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = torch.ones_like(_SCREAMING_SNAKE_CASE )
lowercase__ = sequence == pad_token_id
lowercase__ = 0
return mask
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in story_lines]
lowercase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowercase__ = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in summary_lines]
lowercase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = []
for sequence in batch:
lowercase__ = -1
lowercase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_SCREAMING_SNAKE_CASE )
return torch.tensor(_SCREAMING_SNAKE_CASE )
| 235 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowercase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowercase__ = Image.open(_SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = torch.stack([example['pixel_values'] for example in examples] )
lowercase__ = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __UpperCamelCase () -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '**' )
lowercase__ = load_dataset(
'imagefolder' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = dataset['train'].train_test_split(data_args.train_val_split )
lowercase__ = split['train']
lowercase__ = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset['train'].features['labels'].names
lowercase__ , lowercase__ = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = str(_SCREAMING_SNAKE_CASE )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['shortest_edge']
else:
lowercase__ = (image_processor.size['height'], image_processor.size['width'])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(_SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initalize our trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 235 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''data2vec-audio'''
def __init__( self :Dict , __UpperCamelCase :Optional[int]=32 , __UpperCamelCase :int=7_68 , __UpperCamelCase :Tuple=12 , __UpperCamelCase :Any=12 , __UpperCamelCase :Tuple=30_72 , __UpperCamelCase :int="gelu" , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Tuple=0.0 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :Any=1e-5 , __UpperCamelCase :str="gelu" , __UpperCamelCase :int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase :Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase :Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase :str=False , __UpperCamelCase :Union[str, Any]=16 , __UpperCamelCase :Union[str, Any]=19 , __UpperCamelCase :Union[str, Any]=5 , __UpperCamelCase :str=0.05 , __UpperCamelCase :Union[str, Any]=10 , __UpperCamelCase :Any=2 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=10 , __UpperCamelCase :Tuple=0 , __UpperCamelCase :Dict="sum" , __UpperCamelCase :int=False , __UpperCamelCase :Dict=False , __UpperCamelCase :Tuple=2_56 , __UpperCamelCase :Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase :Union[str, Any]=(5, 3, 3, 1, 1) , __UpperCamelCase :Dict=(1, 2, 3, 1, 1) , __UpperCamelCase :Tuple=5_12 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=1 , __UpperCamelCase :str=2 , __UpperCamelCase :Any=False , __UpperCamelCase :Any=3 , __UpperCamelCase :Dict=2 , __UpperCamelCase :str=3 , __UpperCamelCase :List[Any]=None , **__UpperCamelCase :List[Any] , ):
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
A = hidden_size
A = feat_extract_activation
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = conv_bias
A = num_conv_pos_embeddings
A = num_conv_pos_embedding_groups
A = conv_pos_kernel_size
A = len(self.conv_dim )
A = num_hidden_layers
A = intermediate_size
A = hidden_act
A = num_attention_heads
A = hidden_dropout
A = attention_dropout
A = activation_dropout
A = feat_proj_dropout
A = final_dropout
A = layerdrop
A = layer_norm_eps
A = initializer_range
A = vocab_size
A = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A = mask_time_prob
A = mask_time_length
A = mask_time_min_masks
A = mask_feature_prob
A = mask_feature_length
A = mask_feature_min_masks
# ctc loss
A = ctc_loss_reduction
A = ctc_zero_infinity
# adapter
A = add_adapter
A = adapter_kernel_size
A = adapter_stride
A = num_adapter_layers
A = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = xvector_output_dim
@property
def lowerCamelCase ( self :Any ):
return math.prod(self.conv_stride )
| 524 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def A__ ( UpperCamelCase , UpperCamelCase ):
A = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( UpperCamelCase , UpperCamelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
A = in_proj_weight[
: encoder_config.hidden_size, :
]
A = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = dct.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
if "handwritten" in checkpoint_url:
A = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase ):
A = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase )
A = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A = 1_024
A = 4_096
A = 24
A = 16
A = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A = False
A = "relu"
A = 1_024
A = True
A = False
A = False
# load HuggingFace model
A = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase )
A = TrOCRForCausalLM(UpperCamelCase )
A = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , check_hash=UpperCamelCase )["model"]
A = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A = state_dict.pop(UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A = val
else:
A = val
# load state dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image
A = ViTImageProcessor(size=encoder_config.image_size )
A = RobertaTokenizer.from_pretrained("roberta-large" )
A = TrOCRProcessor(UpperCamelCase , UpperCamelCase )
A = processor(images=prepare_img(UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase )
A = outputs.logits
A = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
A = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_snake_case : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 524 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase ) | 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main() | 52 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = LayoutLMTokenizer
__lowerCamelCase : int = LayoutLMTokenizerFast
__lowerCamelCase : Tuple = True
__lowerCamelCase : Optional[Any] = True
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
A__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a_ ( self : int , **__lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : str , __lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
A__ = """UNwant\u00E9d,running"""
A__ = """unwanted, running"""
return input_text, output_text
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
| 247 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''microsoft/speecht5_tts'''
a__ =(
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
a__ ='''text_reader'''
a__ =SpeechTaProcessor
a__ =SpeechTaForTextToSpeech
a__ =SpeechTaHifiGan
a__ =['''text''']
a__ =['''audio''']
def __lowerCAmelCase ( self ) -> Tuple:
if self.post_processor is None:
_UpperCAmelCase : Dict = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowerCAmelCase ( self , A , A=None ) -> Optional[int]:
_UpperCAmelCase : List[Any] = self.pre_processor(text=A , return_tensors='''pt''' , truncation=A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_UpperCAmelCase : List[str] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_UpperCAmelCase : Optional[Any] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self , A ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**A )
def __lowerCAmelCase ( self , A ) -> List[Any]:
with torch.no_grad():
return self.post_processor(A ).cpu().detach()
| 506 |
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Dict = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase : Optional[int] = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
_UpperCAmelCase : Any = str(UpperCamelCase__ )
new_value += actual_value
_UpperCAmelCase : Tuple = num // base
_UpperCAmelCase : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 506 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase_ : Optional[int] = k.replace(UpperCamelCase ,UpperCamelCase )
if k.startswith('encoder' ):
UpperCAmelCase_ : Dict = k.replace('.attn' ,'.self_attn' )
UpperCAmelCase_ : int = k.replace('norm1' ,'self_attn_layer_norm' )
UpperCAmelCase_ : int = k.replace('norm2' ,'final_layer_norm' )
elif k.startswith('decoder' ):
UpperCAmelCase_ : Union[str, Any] = k.replace('norm1' ,'self_attn_layer_norm' )
UpperCAmelCase_ : int = k.replace('norm2' ,'encoder_attn_layer_norm' )
UpperCAmelCase_ : List[str] = k.replace('norm3' ,'final_layer_norm' )
return k
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
UpperCAmelCase_ : Optional[int] = sd.pop(UpperCamelCase )
UpperCAmelCase_ : List[Any] = k.replace('layernorm_embedding' ,'layer_norm' )
assert new_k not in sd
UpperCAmelCase_ : Any = v
lowerCAmelCase__ = ["START"]
@torch.no_grad()
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
UpperCAmelCase_ : str = torch.load(UpperCamelCase ,map_location='cpu' )
UpperCAmelCase_ : Tuple = model['model']
UpperCAmelCase_ : Union[str, Any] = BlenderbotConfig.from_json_file(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = BlenderbotForConditionalGeneration(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = m.model.state_dict().keys()
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase_ : Union[str, Any] = rename_state_dict_key(UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase_ : List[str] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase )
m.model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
m.half()
m.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 471 |
'''simple docstring'''
import math
class lowercase :
def __init__( self , _snake_case=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Optional[Any] = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # adjacency matrix for weight
UpperCAmelCase_ : Tuple = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = w
def _snake_case ( self) -> str:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _snake_case ( self , _snake_case , _snake_case) -> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 471 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = AlbertConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = AlbertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 48 | # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase__ : Dict = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase__ : str = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
lowercase__ : List[Any] = '''|'''.join(sys.argv[1:])
lowercase__ : Tuple = re.compile(Rf'^({joined_dirs}).*?\.py$')
lowercase__ : Tuple = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 312 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCamelCase ( _A : int ) ->Optional[Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ =k.replace(_A , _A )
if k.startswith("""encoder""" ):
lowerCamelCase_ =k.replace(""".attn""" , """.self_attn""" )
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm3""" , """final_layer_norm""" )
return k
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCamelCase_ =sd.pop(_A )
lowerCamelCase_ =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCamelCase_ =v
__A : Optional[int] = ['START']
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : int , _A : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =model["""model"""]
lowerCamelCase_ =BlenderbotConfig.from_json_file(_A )
lowerCamelCase_ =BlenderbotForConditionalGeneration(_A )
lowerCamelCase_ =m.model.state_dict().keys()
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ =rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 702 |
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> List[str]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[]
def _snake_case ( self )-> str:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
return len(self.graph[u] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return sorted_nodes
def _snake_case ( self )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> Optional[Any]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]:
# check if the u exists
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
# the other way round
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return len(self.graph[u] )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self )-> Optional[Any]:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
| 75 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ):
"""simple docstring"""
lowercase__ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE )
lowercase__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCAmelCase , output_all_encodings=__lowerCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase__ = _load_vocab(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls=__lowerCAmelCase )
lowerCAmelCase__ = nlp.model.BERTModel(
__lowerCAmelCase , len(__lowerCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCAmelCase , use_token_type_embed=__lowerCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCAmelCase , use_decoder=__lowerCAmelCase , )
original_bort.load_parameters(__lowerCAmelCase , cast_dtype=__lowerCAmelCase , ignore_extra=__lowerCAmelCase )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCAmelCase ),
}
lowerCAmelCase__ = BertConfig.from_dict(__lowerCAmelCase )
lowerCAmelCase__ = BertForMaskedLM(__lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase )['''input_ids''']
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=__lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ = BertModel.from_pretrained(__lowerCAmelCase )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ = hf_bort_model(**__lowerCAmelCase )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__magic_name__ : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 615 | 0 |
"""simple docstring"""
import math
def a__ ( __lowercase , __lowercase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law") | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
'''simple docstring'''
import re
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__UpperCamelCase , __UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 566 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ):
'''simple docstring'''
__lowercase = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = ['''flax''', '''transformers''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
_lowerCAmelCase : Any = ['''flax''', '''transformers''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''flax''', '''transformers'''])
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
UpperCamelCase__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCamelCase__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCamelCase__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence'''),
'''references''': datasets.Value('''string''' ,id='''sequence'''),
}) ,reference_urls=[] ,)
def __UpperCamelCase ( self : Dict ,a__ : Optional[Any] ,a__ : Dict ,a__ : List[str]=None ,a__ : List[Any]=False ,a__ : Tuple=False ,a__ : str=False ,) -> List[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase:List[Any] = np.array([re.sub(a__ ,'''''' ,a__) for x in predictions])
_lowerCAmelCase:Optional[Any] = np.array([re.sub(a__ ,'''''' ,a__) for x in references])
else:
_lowerCAmelCase:Optional[Any] = np.asarray(a__)
_lowerCAmelCase:Union[str, Any] = np.asarray(a__)
if ignore_case:
_lowerCAmelCase:List[str] = np.char.lower(a__)
_lowerCAmelCase:Any = np.char.lower(a__)
if ignore_punctuation:
_lowerCAmelCase:Dict = string.punctuation.maketrans('''''' ,'''''' ,string.punctuation)
_lowerCAmelCase:int = np.char.translate(a__ ,table=a__)
_lowerCAmelCase:Union[str, Any] = np.char.translate(a__ ,table=a__)
if ignore_numbers:
_lowerCAmelCase:List[Any] = string.digits.maketrans('''''' ,'''''' ,string.digits)
_lowerCAmelCase:Any = np.char.translate(a__ ,table=a__)
_lowerCAmelCase:Union[str, Any] = np.char.translate(a__ ,table=a__)
_lowerCAmelCase:str = predictions == references
return {"exact_match": np.mean(a__) * 100}
| 227 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : Optional[int] ):
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase:List[Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase ( snake_case : List[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Tuple ):
_lowerCAmelCase:Union[str, Any] = tmp_path / '''cache'''
_lowerCAmelCase:Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCAmelCase:List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase:Dict = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case , cache_dir=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
def UpperCAmelCase ( snake_case : List[str] ):
with contextlib.closing(sqlitea.connect(snake_case ) ) as con:
_lowerCAmelCase:Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Optional[int] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase:int = iter_sql_file(snake_case )
_lowerCAmelCase:Any = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Any = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase:List[str] = iter_sql_file(snake_case )
_lowerCAmelCase:Tuple = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Dict , snake_case : Tuple , snake_case : Optional[int] ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:List[str] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
with pytest.raises(snake_case ):
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 227 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : List[str] = val
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase : int = value
else:
UpperCAmelCase : Dict = value
return new_state_dict
def lowercase ( __magic_name__ , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase : List[str] = ""
if is_panoptic:
UpperCAmelCase : Tuple = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : int = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Optional[int] = in_proj_weight[:256, :]
UpperCAmelCase : Dict = in_proj_bias[:256]
UpperCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
UpperCAmelCase : Optional[int] = in_proj_bias[256:512]
UpperCAmelCase : Any = in_proj_weight[-256:, :]
UpperCAmelCase : List[str] = in_proj_bias[-256:]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[str] = "resnet101"
if "dc5" in model_name:
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[Any] = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase : str = 250
else:
UpperCAmelCase : int = 91
UpperCAmelCase : Optional[Any] = "huggingface/label-files"
UpperCAmelCase : Optional[int] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : int = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase : Union[str, Any] = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : str = image_processor(images=__magic_name__ , return_tensors="pt" )
UpperCAmelCase : str = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
UpperCAmelCase : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" , __magic_name__ , pretrained=__magic_name__ ).eval()
UpperCAmelCase : Union[str, Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : int = "conditional_detr." + src
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : List[Any] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase : int = state_dict.pop(__magic_name__ )
UpperCAmelCase : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : List[Any] = state_dict.pop(__magic_name__ )
UpperCAmelCase : Union[str, Any] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase : List[str] = state_dict.pop(__magic_name__ )
UpperCAmelCase : Tuple = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase : Optional[Any] = state_dict.pop(__magic_name__ )
UpperCAmelCase : Dict = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : Optional[Any] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase : Optional[Any] = conditional_detr(__magic_name__ )
UpperCAmelCase : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 609 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 609 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_snake_case = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = SavedModel()
lowercase__ = []
with open(os.path.join(__magic_name__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowercase__ = json.load(__magic_name__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowercase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase__ = sorted(__magic_name__ )
lowercase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ , sep="\n" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
_snake_case = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 655 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_a : Tuple = parser.parse_args()
if args.model_type == "bert":
_a : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
_a : List[str] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_a : Optional[Any] = model.state_dict()
_a : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
_a : Optional[int] = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
_a : int = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
_a : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a : Dict = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
_a : List[Any] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
_a : Optional[int] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
_a : List[str] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
_a : Optional[int] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
_a : Optional[int] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
_a : int = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
_a : Tuple = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
_a : str = state_dict['cls.predictions.decoder.weight']
_a : Dict = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : List[Any] = state_dict[f"cls.predictions.transform.dense.{w}"]
_a : List[Any] = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 527 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Union[str, Any]=18 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : Any=4_00 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE : List[Any] = do_thumbnail
SCREAMING_SNAKE_CASE : Union[str, Any] = do_align_axis
SCREAMING_SNAKE_CASE : Tuple = do_pad
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Any = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def __lowercase ( self : int ):
"""simple docstring"""
pass
@is_flaky()
def __lowercase ( self : int ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __lowercase ( self : List[Any] ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 527 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : Optional[int] = 3
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowerCamelCase_ : Dict = random.randrange(3 , __UpperCAmelCase )
if pow(__UpperCAmelCase , 2 , __UpperCAmelCase ) == 1:
continue
if pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) == 1:
continue
return g
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
print('''Generating prime p...''' )
lowerCamelCase_ : str = rabin_miller.generate_large_prime(__UpperCAmelCase ) # select large prime number.
lowerCamelCase_ : Optional[Any] = primitive_root(__UpperCAmelCase ) # one primitive root on modulo p.
lowerCamelCase_ : Tuple = random.randrange(3 , __UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
lowerCamelCase_ : Tuple = cryptomath.find_mod_inverse(pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = (key_size, e_a, e_a, p)
lowerCamelCase_ : Optional[Any] = (key_size, d)
return public_key, private_key
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = generate_key(__UpperCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __snake_case ():
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 418 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Tuple , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 418 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a , __a : List[str] = analyze_text(snake_case__ )
__a : Dict = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__a : Dict = sum(single_char_strings.values() )
# one length string
__a : Any = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__a : Dict = single_char_strings[ch]
__a : Optional[int] = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__a : List[Any] = sum(two_char_strings.values() )
__a : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__a : int = cha + cha
if sequence in two_char_strings:
__a : List[Any] = two_char_strings[sequence]
__a : Optional[Any] = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : int = Counter() # type: ignore
__a : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 476 |
import math
def UpperCamelCase_( snake_case__: float , snake_case__: float ) -> float:
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def UpperCamelCase_( snake_case__: float , snake_case__: float ) -> float:
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """▁"""
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_SCREAMING_SNAKE_CASE = {"""vinai/bartpho-syllable""": 1_024}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : List[str]="<unk>" , snake_case_ : Optional[Any]="<pad>" , snake_case_ : List[str]="<mask>" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__snake_case = vocab_file
__snake_case = monolingual_vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__snake_case = {}
__snake_case = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case_ ) not in self.fairseq_tokens_to_ids:
__snake_case = cnt
cnt += 1
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
__snake_case = line.strip().split()[0]
__snake_case = len(self.fairseq_tokens_to_ids )
if str(snake_case_ ) not in self.fairseq_tokens_to_ids:
__snake_case = len(self.fairseq_tokens_to_ids )
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
__snake_case = self.__dict__.copy()
__snake_case = None
__snake_case = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , snake_case_ : Tuple ):
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase ( self : int ):
return len(self.fairseq_ids_to_tokens )
def lowerCAmelCase ( self : Any ):
__snake_case = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Optional[int] , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : List[str] , snake_case_ : str ):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Any ):
__snake_case = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def lowerCAmelCase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case_ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(snake_case_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 614 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : torch.FloatTensor
class __magic_name__ ( lowercase__ , lowercase__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@register_to_config
def __init__( self : List[Any] , snake_case_ : int = 2000 , snake_case_ : float = 0.15 , snake_case_ : float = 0.01 , snake_case_ : float = 1348.0 , snake_case_ : float = 1e-5 , snake_case_ : int = 1 , ):
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase ( self : Any , snake_case_ : int , snake_case_ : float = None , snake_case_ : Union[str, torch.device] = None ):
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , snake_case_ , snake_case_ , device=snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : int , snake_case_ : float = None , snake_case_ : float = None , snake_case_ : float = None ):
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case_ , snake_case_ )
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(snake_case_ ) , math.log(snake_case_ ) , snake_case_ ) )
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase ( self : Dict , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : Optional[torch.Generator] = None , snake_case_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device )
__snake_case = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case = self.get_adjacent_sigma(snake_case_ , snake_case_ ).to(sample.device )
__snake_case = torch.zeros_like(snake_case_ )
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case = diffusion.unsqueeze(-1 )
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case_ , device=sample.device , dtype=sample.dtype )
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case_ , prev_sample_mean=snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : Optional[torch.Generator] = None , snake_case_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case = step_size.unsqueeze(-1 )
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device )
__snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case_ ) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 614 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = torch.nn.Linear(2 ,4 )
A_ : Optional[Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
A_ : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
A_ : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A_ : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCamelCase ):
@require_cuda
def _lowerCamelCase ( self ):
A_ : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
A_ : Tuple = Accelerator(cpu=a__ )
def _lowerCamelCase ( self ):
A_ : Optional[int] = Accelerator()
A_ : Optional[Any] = GradientState()
assert state.num_steps == 1
A_ : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCamelCase ( self ):
A_ : List[str] = Accelerator()
A_ , A_ , A_ , A_ , A_ : str = create_components()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCamelCase ( self ):
A_ : int = Accelerator()
A_ , A_ , A_ , A_ , A_ : Optional[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCamelCase ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
A_ : Tuple = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _lowerCamelCase ( self ):
A_ : Dict = Accelerator()
A_ , A_ , A_ , A_ , A_ : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
A_ : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
def _lowerCamelCase ( self ):
A_ : Dict = Accelerator()
A_ , A_ , A_ , A_ , A_ : Tuple = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
A_ : List[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
A_ : Union[str, Any] = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
A_ : List[str] = json.load(a__ )
A_ : Any = config["""class_name"""]
A_ : Dict = accelerator.register_save_state_pre_hook(a__ )
A_ : Optional[Any] = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
A_ : Tuple = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
A_ : str = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCamelCase ( self ):
A_ : List[Any] = Accelerator()
A_ , A_ , A_ , A_ , A_ : Any = create_components()
A_ : Any = None
# This should work
A_ , A_ , A_ , A_ , A_ , A_ : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def _lowerCamelCase ( self ):
A_ : Dict = Accelerator()
A_ , A_ , A_ , A_ , A_ : List[Any] = create_components()
A_ : Dict = [1, 2, 3]
# This should work
A_ , A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _lowerCamelCase ( self ):
from transformers import AutoModelForCausalLM
A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
A_ : List[str] = Accelerator()
# This should work
A_ : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def _lowerCamelCase ( self ):
from transformers import AutoModelForCausalLM
A_ : Tuple = Accelerator()
with init_empty_weights():
A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A_ : Tuple = infer_auto_device_map(a__ )
A_ : Dict = """cpu"""
A_ : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
A_ : Union[str, Any] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ):
from transformers import AutoModelForCausalLM
A_ : List[Any] = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A_ : Dict = infer_auto_device_map(a__ )
A_ : Optional[Any] = 1
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
A_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
A_ : Optional[Any] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
A_ : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
A_ : Any = infer_auto_device_map(a__ )
A_ : int = 1
A_ : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
A_ : List[Any] = Accelerator()
# This should work
A_ : Union[str, Any] = accelerator.prepare(a__ )
@require_cuda
def _lowerCamelCase ( self ):
A_ : Tuple = torch.nn.Linear(10 , 10 )
A_ : str = torch.optim.SGD(model.parameters() , lr=0.01 )
A_ : Dict = Accelerator(cpu=a__ )
A_ : Tuple = accelerator.prepare(a__ )
| 569 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase = """true"""
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
set_seed(4_2 )
A_ : List[Any] = RegressionModel()
A_ : int = deepcopy(_lowerCAmelCase )
A_ : Union[str, Any] = RegressionDataset(length=_lowerCAmelCase )
A_ : Union[str, Any] = DataLoader(_lowerCAmelCase ,batch_size=_lowerCAmelCase )
model.to(accelerator.device )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return model, ddp_model, dataloader
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
A_ : Tuple = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
A_ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
A_ : List[Any] = dataset.map(
_lowerCAmelCase ,batched=_lowerCAmelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
A_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase ,shuffle=_lowerCAmelCase ,collate_fn=_lowerCAmelCase ,batch_size=1_6 )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : int = Accelerator(dispatch_batches=_lowerCAmelCase ,split_batches=_lowerCAmelCase )
A_ : List[Any] = get_dataloader(_lowerCAmelCase ,not dispatch_batches )
A_ : Any = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=_lowerCAmelCase )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = []
for batch in dataloader:
A_ , A_ : List[str] = batch.values()
with torch.no_grad():
A_ : Tuple = model(_lowerCAmelCase )
A_ , A_ : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ : int = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
A_ , A_ : Optional[int] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
A_ , A_ , A_ : Union[str, Any] = get_basic_setup(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ , A_ : Union[str, Any] = generate_predictions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def _lowerCAmelCase ( _lowerCAmelCase = False ,_lowerCAmelCase = False ):
'''simple docstring'''
A_ : Any = evaluate.load("""glue""" ,"""mrpc""" )
A_ , A_ : Optional[Any] = get_mrpc_setup(_lowerCAmelCase ,_lowerCAmelCase )
# First do baseline
A_ , A_ , A_ : Any = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
A_ : List[str] = model(**_lowerCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase ,references=batch["""labels"""] )
A_ : List[str] = metric.compute()
# Then do distributed
A_ , A_ , A_ : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ : Optional[Any] = model(**_lowerCAmelCase )
A_ : Any = outputs.logits.argmax(dim=-1 )
A_ : int = batch["""labels"""]
A_ , A_ : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )
A_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : str = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase ,_lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ : Optional[int] = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
A_ : Union[str, Any] = Accelerator()
test_torch_metrics(_lowerCAmelCase ,5_1_2 )
accelerator.state._reset_state()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 569 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = 13
UpperCAmelCase__ : Union[str, Any] = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = 99
UpperCAmelCase__ : Any = 3_84
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 37
UpperCAmelCase__ : Union[str, Any] = "gelu"
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Optional[int] = 5_12
UpperCAmelCase__ : List[str] = 16
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : List[str] = 0.02
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : str = 1_28
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : List[str] = 9
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = None
def lowerCAmelCase__ ( self )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertModel(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : str = model(__UpperCamelCase )
UpperCAmelCase__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertForMaskedLM(config=__UpperCamelCase )
UpperCAmelCase__ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Tuple = TFConvBertForSequenceClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_choices
UpperCAmelCase__ : str = TFConvBertForMultipleChoice(config=__UpperCamelCase )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Dict = TFConvBertForTokenClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
UpperCAmelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Dict = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFConvBertModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = True
if hasattr(__UpperCamelCase , "use_cache" ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : str = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = os.path.join(__UpperCamelCase , "saved_model" , "1" )
UpperCAmelCase__ : int = tf.keras.models.load_model(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : Optional[int] = outputs["encoder_hidden_states"]
UpperCAmelCase__ : Tuple = outputs["encoder_attentions"]
else:
UpperCAmelCase__ : List[Any] = outputs["hidden_states"]
UpperCAmelCase__ : Union[str, Any] = outputs["attentions"]
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "key_length" , __UpperCamelCase )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : str = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[int] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = model_class(__UpperCamelCase )
UpperCAmelCase__ : Any = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : int = model(__UpperCamelCase )[0]
UpperCAmelCase__ : Any = [1, 6, 7_68]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__ ( a__ , a__ = True , a__ = math.inf , a__ = -math.inf , a__ = math.inf , a__ = -math.inf , a__ = False , a__ = 100 , a__ = 0.01 , a__ = 1 , ) ->Any:
'''simple docstring'''
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(a__ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(a__ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__ ( a__ , a__ ) ->Any:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"{local_min.score()}"
)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"{local_min.score()}"
)
| 547 | from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = get_failure_array(a__ )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(a__ ):
if pattern[j] == text[i]:
if j == (len(a__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(a__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(a__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = '''abc1abc12'''
lowerCamelCase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = '''ABABX'''
lowerCamelCase__ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = '''AAAB'''
lowerCamelCase__ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = '''abcdabcy'''
lowerCamelCase__ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 547 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=8 ):
"""simple docstring"""
_lowerCamelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any],__A : UNetaDConditionModel,__A : DDPMScheduler,__A : VQModel,):
super().__init__()
self.register_modules(
unet=__lowerCAmelCase,scheduler=__lowerCAmelCase,movq=__lowerCAmelCase,)
_lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : Optional[Any],__A : List[str],__A : Optional[Any],__A : Tuple,__A : Dict ):
if latents is None:
_lowerCamelCase : Union[str, Any] = randn_tensor(__lowerCAmelCase,generator=__lowerCAmelCase,device=__lowerCAmelCase,dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : List[Any] = latents.to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : Any,__A : Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : Dict = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase_ ( self : Dict,__A : Any=0 ):
if is_accelerate_available() and is_accelerate_version(">=","0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu",silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase , _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase,__lowerCAmelCase,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self : List[Any] ):
if not hasattr(self.unet,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : List[str],__A : Union[torch.FloatTensor, List[torch.FloatTensor]],__A : Union[torch.FloatTensor, List[torch.FloatTensor]],__A : int = 5_1_2,__A : int = 5_1_2,__A : int = 1_0_0,__A : float = 4.0,__A : int = 1,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : Optional[str] = "pil",__A : bool = True,):
_lowerCamelCase : Dict = self._execution_device
_lowerCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(__lowerCAmelCase,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = torch.cat(__lowerCAmelCase,dim=0 )
_lowerCamelCase : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCAmelCase,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = torch.cat(__lowerCAmelCase,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase,dim=0 )
_lowerCamelCase : Any = negative_image_embeds.repeat_interleave(__lowerCAmelCase,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds],dim=0 ).to(dtype=self.unet.dtype,device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase,device=__lowerCAmelCase )
_lowerCamelCase : List[str] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.unet.config.in_channels
_lowerCamelCase , _lowerCamelCase : List[str] = downscale_height_and_width(__lowerCAmelCase,__lowerCAmelCase,self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width),image_embeds.dtype,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,self.scheduler,)
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Optional[int] = {"image_embeds": image_embeds}
_lowerCamelCase : int = self.unet(
sample=__lowerCAmelCase,timestep=__lowerCAmelCase,encoder_hidden_states=__lowerCAmelCase,added_cond_kwargs=__lowerCAmelCase,return_dict=__lowerCAmelCase,)[0]
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1],dim=1 )
_lowerCamelCase , _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase , _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text],dim=1 )
if not (
hasattr(self.scheduler.config,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1],dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,generator=__lowerCAmelCase,)[0]
# post-processing
_lowerCamelCase : Any = self.movq.decode(__lowerCAmelCase,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCamelCase : int = image * 0.5 + 0.5
_lowerCamelCase : List[str] = image.clamp(0,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0,2,3,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : Optional[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :Tuple = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Union[str, Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 251 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Optional[int]=3 , __magic_name__ : str=18 , __magic_name__ : Dict=30 , __magic_name__ : List[str]=400 , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=None , __magic_name__ : Any=True , ) -> List[str]:
lowerCamelCase_ : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[Any] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Optional[Any] = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : str = do_normalize
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ : Dict = ImageGPTImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "clusters" ) )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_normalize" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase_ : Dict = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , obj[key] ) )
else:
self.assertEqual(obj[key] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : int = os.path.join(__magic_name__ , "image_processor.json" )
image_processor_first.to_json_file(__magic_name__ )
lowerCamelCase_ : Optional[Any] = self.image_processing_class.from_json_file(__magic_name__ ).to_dict()
lowerCamelCase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__magic_name__ )
lowerCamelCase_ : Optional[int] = self.image_processing_class.from_pretrained(__magic_name__ ).to_dict()
lowerCamelCase_ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __magic_name__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
pass
def __a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCamelCase_ : int = Image.open(dataset[4]["file"] )
lowerCamelCase_ : List[str] = Image.open(dataset[5]["file"] )
lowerCamelCase_ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowerCamelCase_ : Optional[int] = prepare_images()
# test non-batched
lowerCamelCase_ : str = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowerCamelCase_ : Dict = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __magic_name__ )
# test batched
lowerCamelCase_ : Dict = image_processing(__magic_name__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowerCamelCase_ : List[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __magic_name__ )
| 705 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [False] * len(__UpperCAmelCase )
lowerCamelCase_ : Dict = [-1] * len(__UpperCAmelCase )
def dfs(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ):
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : int = c
for u in graph[v]:
if not visited[u]:
dfs(__UpperCAmelCase , 1 - c )
for i in range(len(__UpperCAmelCase ) ):
if not visited[i]:
dfs(__UpperCAmelCase , 0 )
for i in range(len(__UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
snake_case_ : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 253 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase =object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase =object()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
__lowerCamelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
def replace(UpperCamelCase__ , UpperCamelCase__ ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def __lowerCAmelCase ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = _get_partition_rules()
__lowerCamelCase = _replacement_rules(UpperCamelCase__ )
__lowerCamelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
__lowerCamelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 546 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "open-llama"
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=10_00_00 , __lowerCamelCase : List[str]=40_96 , __lowerCamelCase : Any=1_10_08 , __lowerCamelCase : str=32 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict="silu" , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : str=0.02 , __lowerCamelCase : List[str]=1e-6 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Optional[Any] , ) -> Optional[Any]:
A : Optional[int] = vocab_size
A : Optional[Any] = max_position_embeddings
A : Tuple = hidden_size
A : List[Any] = intermediate_size
A : Tuple = num_hidden_layers
A : Any = num_attention_heads
A : Optional[int] = hidden_act
A : List[str] = initializer_range
A : int = rms_norm_eps
A : List[str] = use_cache
A : Any = kwargs.pop(
"use_memorry_efficient_attention" , __lowerCamelCase )
A : str = hidden_dropout_prob
A : Any = attention_dropout_prob
A : Tuple = use_stable_embedding
A : Optional[int] = shared_input_output_embedding
A : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
A : Any = self.rope_scaling.get("type" , __lowerCamelCase )
A : Union[str, Any] = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 17 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 1 |
from __future__ import annotations
from collections import deque
class __lowercase :
def __init__( self , lowercase_) -> str:
__snake_case = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(lowercase_)
self.set_fail_transitions()
def _a ( self , lowercase_ , lowercase_) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _a ( self , lowercase_) -> None:
__snake_case = 0
for character in keyword:
__snake_case = self.find_next_state(lowercase_ , lowercase_)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
__snake_case = len(self.adlist) - 1
else:
__snake_case = next_state
self.adlist[current_state]["output"].append(lowercase_)
def _a ( self) -> None:
__snake_case = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowercase_)
__snake_case = 0
while q:
__snake_case = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowercase_)
__snake_case = self.adlist[r]['fail_state']
while (
self.find_next_state(lowercase_ , self.adlist[child]['value']) is None
and state != 0
):
__snake_case = self.adlist[state]['fail_state']
__snake_case = self.find_next_state(
lowercase_ , self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
__snake_case = 0
__snake_case = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def _a ( self , lowercase_) -> dict[str, list[int]]:
__snake_case = {} # returns a dict with keywords and list of its occurrences
__snake_case = 0
for i in range(len(lowercase_)):
while (
self.find_next_state(lowercase_ , string[i]) is None
and current_state != 0
):
__snake_case = self.adlist[current_state]['fail_state']
__snake_case = self.find_next_state(lowercase_ , string[i])
if next_state is None:
__snake_case = 0
else:
__snake_case = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case = []
result[key].append(i - len(lowercase_) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : torch.FloatTensor
class A_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 3 , __UpperCAmelCase = 32 , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 32 , __UpperCAmelCase = None , __UpperCAmelCase = 0.1_8215 , __UpperCAmelCase = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
a : Optional[int] = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
a : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
a : Tuple = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a : Optional[Any] = VectorQuantizer(__UpperCAmelCase , __UpperCAmelCase , beta=0.25 , remap=__UpperCAmelCase , sane_index_shape=__UpperCAmelCase )
a : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
# pass init params to Decoder
a : Any = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , norm_type=__UpperCAmelCase , )
@apply_forward_hook
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = True ) -> VQEncoderOutput:
a : Dict = self.encoder(__UpperCAmelCase )
a : int = self.quant_conv(__UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCAmelCase )
@apply_forward_hook
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
a , a , a : List[Any] = self.quantize(__UpperCAmelCase )
else:
a : List[str] = h
a : str = self.post_quant_conv(__UpperCAmelCase )
a : Any = self.decoder(__UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
a : Any = sample
a : Any = self.encode(__UpperCAmelCase ).latents
a : List[Any] = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 509 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : str = ["image_processor", "tokenizer"]
lowercase : Union[str, Any] = "FlavaImageProcessor"
lowercase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple:
a : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
a : Any = kwargs.pop('feature_extractor' )
a : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
a : Optional[Any] = self.image_processor
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a : Tuple = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
a : Tuple = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ ( self ) -> str:
a : str = self.tokenizer.model_input_names
a : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 509 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase )
else:
lowerCamelCase__ = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase )
for i, tensor in enumerate(__lowerCAmelCase ):
if padding_side == "right":
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = tensor[:sequence_length]
else:
lowerCamelCase__ = tensor[:sequence_length]
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = tensor[:sequence_length]
else:
lowerCamelCase__ = tensor[:sequence_length]
return out_tensor.tolist()
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = ord(__lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCamelCase__ = unicodedata.category(__lowerCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = -100
_UpperCamelCase = "pt"
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
import torch
lowerCamelCase__ = """label""" if """label""" in features[0].keys() else """labels"""
lowerCamelCase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCamelCase__ = self.tokenizer.pad(
_lowerCAmelCase ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,)
if labels is None:
return batch
lowerCamelCase__ = torch.tensor(batch["""entity_ids"""] ).shape[1]
lowerCamelCase__ = self.tokenizer.padding_side
if padding_side == "right":
lowerCamelCase__ = [
list(_lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) for label in labels
]
else:
lowerCamelCase__ = [
[self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) + list(_lowerCAmelCase ) for label in labels
]
lowerCamelCase__ = [feature["""ner_tags"""] for feature in features]
lowerCamelCase__ = padding_tensor(_lowerCAmelCase ,-1 ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = [feature["""original_entity_spans"""] for feature in features]
lowerCamelCase__ = padding_tensor(_lowerCAmelCase ,(-1, -1) ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {k: torch.tensor(_lowerCAmelCase ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 50 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" ) | 46 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : tuple , __magic_name__ : Path , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str]=False , ) -> Optional[int]:
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> List[str]:
lowercase : Optional[Any] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase : Tuple ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase : Tuple ='''cpu'''
lowercase : Dict =Path(__magic_name__ )
# VAE DECODER
lowercase : List[Any] =AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowercase : Any =vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase : Dict =vae_decoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , 25 , 25 ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__magic_name__ , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCamelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase : List[str] = _modexpt(__magic_name__ , exponent // 2 , __magic_name__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__magic_name__ , exponent - 1 , __magic_name__ )) % modulo_value
def lowercase ( __magic_name__ = 1777 , __magic_name__ = 1855 , __magic_name__ = 8 ):
'''simple docstring'''
UpperCAmelCase : Dict = base
for _ in range(1 , __magic_name__ ):
UpperCAmelCase : Any = _modexpt(__magic_name__ , __magic_name__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 679 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 1 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 716 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = (IPNDMScheduler,)
_snake_case = (('''num_inference_steps''', 50),)
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase : Optional[int] = {'num_train_timesteps': 10_00}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
__lowerCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__lowerCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Union[str, Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ) -> Optional[Any]:
pass
def snake_case ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Union[str, Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.dummy_sample
__lowerCAmelCase : Optional[int] = 0.1 * sample
__lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Tuple = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : int = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def snake_case ( self ) -> Any:
__lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Optional[int] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : str = scheduler_class(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_sample
__lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , 'set_timesteps' ):
__lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : Dict = scheduler.timesteps[5]
__lowerCAmelCase : str = scheduler.timesteps[6]
__lowerCAmelCase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ) -> int:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = self.full_loop()
__lowerCAmelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 123 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase__ : int = 5
lowercase__ : List[str] = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = SpeechaTextTokenizer
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
super().setUp()
lowerCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__SCREAMING_SNAKE_CASE ) )]
lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = Path(self.tmpdirname )
save_json(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = '''<pad>'''
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1001 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
# fmt: off
lowerCAmelCase = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = """valhalla/s2t_mustc_multilinguial_medium"""
UpperCAmelCase_ : Optional[Any] = """C'est trop cool"""
UpperCAmelCase_ : Optional[Any] = """Esto es genial"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) ->Dict:
lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
lowerCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''fr'''
lowerCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __SCREAMING_SNAKE_CASE )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 312 | import os
lowercase__ : List[str] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(snake_case__ ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ''''''
lowerCAmelCase = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
lowerCAmelCase = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
lowerCAmelCase = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE_ ( snake_case__ = "/p089_roman.txt" ) -> int:
lowerCAmelCase = 0
with open(os.path.dirname(snake_case__ ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(snake_case__ )
lowerCAmelCase = generate_roman_numerals(snake_case__ )
savings += len(snake_case__ ) - len(snake_case__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 312 | 1 |
import random
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
__a : List[str] = a[left_index]
__a : Optional[int] = left_index + 1
for j in range(left_index + 1 , lowerCamelCase_ ):
if a[j] < pivot:
__a , __a : Dict = a[i], a[j]
i += 1
__a , __a : str = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
if left < right:
__a : int = random.randint(lowerCamelCase_ , right - 1 )
__a , __a : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a : List[str] = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
quick_sort_random(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase_ , pivot_index + 1 , lowerCamelCase_ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase__ ( ):
__a : List[Any] = input('Enter numbers separated by a comma:\n' ).strip()
__a : List[str] = [int(lowerCamelCase_ ) for item in user_input.split(',' )]
quick_sort_random(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 705 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 12_8022
SCREAMING_SNAKE_CASE__ = 12_8028
@require_sentencepiece
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MaMaaaTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
__a : Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Dict = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
__a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Tuple = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/m2m100_418M'''
__SCREAMING_SNAKE_CASE : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__SCREAMING_SNAKE_CASE : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'en'
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
__a : Any = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Tuple = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = 'en'
__a : int = 'fr'
__a : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : Optional[int] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 577 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , )-> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = encoder_stride
UpperCamelCase_ = num_attention_outputs
UpperCamelCase_ = embed_dim
UpperCamelCase_ = embed_dim + 1
UpperCamelCase_ = resolution
UpperCamelCase_ = depths
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = dim
UpperCamelCase_ = mlp_expansion_ratio
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> int:
UpperCamelCase_ = TFEfficientFormerModel(config=_lowercase )
UpperCamelCase_ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = TFEfficientFormerForImageClassification(_lowercase )
UpperCamelCase_ = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = TFEfficientFormerForImageClassification(_lowercase )
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Union[str, Any] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Optional[int] = False
UpperCamelCase_ :Optional[int] = False
UpperCamelCase_ :str = False
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :str = False
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = TFEfficientFormerModelTester(self )
UpperCamelCase_ = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def UpperCAmelCase_ ( self )-> str:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , "encoder_seq_length" ):
UpperCamelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
UpperCamelCase_ = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
UpperCamelCase_ = getattr(self.model_tester , "seq_length" , _lowercase )
UpperCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase=False )-> int:
UpperCamelCase_ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> int:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
UpperCamelCase_ = getattr(self.model_tester , "seq_length" , _lowercase )
UpperCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , _lowercase )
UpperCamelCase_ = getattr(self.model_tester , "key_length" , _lowercase )
UpperCamelCase_ = getattr(self.model_tester , "chunk_length" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
UpperCamelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCAmelCase_ ( self )-> Optional[int]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase_ = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase_ = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self )-> int:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="tf" )
# forward pass
UpperCamelCase_ = model(**_lowercase , training=_lowercase )
# verify the logits
UpperCamelCase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="tf" )
# forward pass
UpperCamelCase_ = model(**_lowercase , training=_lowercase )
# verify the logits
UpperCamelCase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 628 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger("""transformers.models.encodec""")
SCREAMING_SNAKE_CASE :Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
SCREAMING_SNAKE_CASE :List[str] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE :Optional[int] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE :int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE :Dict = []
SCREAMING_SNAKE_CASE :int = []
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
"""simple docstring"""
for attribute in key.split("." ):
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
elif weight_type == "running_mean":
UpperCamelCase_ = value
elif weight_type == "running_var":
UpperCamelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase_ = value
elif weight_type == "weight_ih_l0":
UpperCamelCase_ = value
elif weight_type == "weight_hh_l0":
UpperCamelCase_ = value
elif weight_type == "bias_ih_l0":
UpperCamelCase_ = value
elif weight_type == "bias_hh_l0":
UpperCamelCase_ = value
elif weight_type == "weight_ih_l1":
UpperCamelCase_ = value
elif weight_type == "weight_hh_l1":
UpperCamelCase_ = value
elif weight_type == "bias_ih_l1":
UpperCamelCase_ = value
elif weight_type == "bias_hh_l1":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase_ , UpperCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
UpperCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase_ = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info(f"{name} was ignored" )
continue
UpperCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase_ , UpperCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
UpperCamelCase_ = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCamelCase_ = "weight_g"
elif "weight_v" in name:
UpperCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
UpperCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCamelCase_ = "bias_hh_l1"
elif "bias" in name:
UpperCamelCase_ = "bias"
elif "weight" in name:
UpperCamelCase_ = "weight"
elif "running_mean" in name:
UpperCamelCase_ = "running_mean"
elif "running_var" in name:
UpperCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase_ = "num_batches_tracked"
else:
UpperCamelCase_ = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase_ = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase_ = [8, 5, 4, 4]
UpperCamelCase_ = [2.2]
UpperCamelCase_ = 6_4
UpperCamelCase_ = 3_2_0_0_0
UpperCamelCase_ = 2_0_4_8
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
elif model_name == "encodec_48khz":
UpperCamelCase_ = [8, 5, 4, 2]
UpperCamelCase_ = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase_ = 4_8_0_0_0
UpperCamelCase_ = 2
UpperCamelCase_ = False
UpperCamelCase_ = "time_group_norm"
UpperCamelCase_ = True
UpperCamelCase_ = 1.0
UpperCamelCase_ = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
UpperCamelCase_ = EncodecModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 628 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case( __A , unittest.TestCase ):
_A = KandinskyVaaInpaintPipeline
_A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A = False
@property
def A ( self ):
'''simple docstring'''
return 32
@property
def A ( self ):
'''simple docstring'''
return 32
@property
def A ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self ):
'''simple docstring'''
return 100
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**A_ )
return model
@property
def A ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A_ , )
_SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self , A_ , A_=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(A_ ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(A_ )
_SCREAMING_SNAKE_CASE = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A_ )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(A_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
_SCREAMING_SNAKE_CASE = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = '''a hat'''
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 707 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
return x + 2
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
_SCREAMING_SNAKE_CASE = '''x = y'''
_SCREAMING_SNAKE_CASE = {'''y''': 5}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 5, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = add_two(x)'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3\ny = 5'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''text = f\'This is x: {x}.\''''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 2} )
_SCREAMING_SNAKE_CASE = {'''x''': 8}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 8, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertListEqual(A_ , [3, 5] )
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = x'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 0\nfor i in range(3):\n x = i'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''range''': range} , state=A_ )
assert result == 2
self.assertDictEqual(A_ , {'''x''': 2, '''i''': 2} )
| 168 | 0 |
'''simple docstring'''
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> bool:
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' ,[
SplitDict(),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=1337 ,num_examples=42 ,dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=1337 ,num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] ,)
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> List[str]:
A_ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A_ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A_ = None
# the split name of split_dict takes over the name of the split info object
A_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' ,[SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
A_ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 366 | 1 |
from collections import deque
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Dict = len(lowercase_ )
snake_case_ : str = deque()
snake_case_ : str = [False for _ in range(lowercase_ )]
snake_case_ : List[Any] = [-1 for _ in range(lowercase_ )]
snake_case_ : Any = index_of[:]
def strong_connect(__a , __a , __a ):
snake_case_ : List[Any] = index # the number when this node is seen
snake_case_ : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(lowercase_ )
snake_case_ : int = True
for w in g[v]:
if index_of[w] == -1:
snake_case_ : int = strong_connect(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case_ : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case_ : str = []
snake_case_ : Dict = stack.pop()
snake_case_ : Dict = False
component.append(lowercase_ )
while w != v:
snake_case_ : List[str] = stack.pop()
snake_case_ : Tuple = False
component.append(lowercase_ )
components.append(lowercase_ )
return index
snake_case_ : Tuple = []
for v in range(lowercase_ ):
if index_of[v] == -1:
strong_connect(lowercase_ , 0 , lowercase_ )
return components
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = [[] for _ in range(lowercase_ )]
for u, v in edges:
g[u].append(lowercase_ )
return g
if __name__ == "__main__":
# Test
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
_SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 707 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: float
__magic_name__: TreeNode | None = None
__magic_name__: TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( __a ):
# Validation
def is_valid_tree(__a ) -> bool:
if node is None:
return True
if not isinstance(__a , __a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__a , __a , __a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __a )
)
return is_binary_search_tree_recursive_check(__a , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : str = 0
while b > 0:
if b & 1:
UpperCamelCase__ : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
from math import factorial
def _A ( lowerCAmelCase_ : int = 20 ):
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 61 | import argparse
from collections import defaultdict
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[Any]:
"""simple docstring"""
lowercase = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(UpperCAmelCase, '''r''' ) as f:
lowercase = f.readlines()
lowercase = f'class {class_name}('
lowercase = f'{4 * " "}def {test_name}('
lowercase = f'{8 * " "}{correct_line.split()[0]}'
lowercase = f'{16 * " "}{correct_line.split()[0]}'
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = 0
lowercase = []
for line in lines:
if line.startswith(UpperCAmelCase ):
lowercase = True
elif in_class and line.startswith(UpperCAmelCase ):
lowercase = True
elif in_class and in_func and (line.startswith(UpperCAmelCase ) or line.startswith(UpperCAmelCase )):
lowercase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
lowercase = lowercase = lowercase = lowercase = False
else:
new_lines.append(UpperCAmelCase )
with open(UpperCAmelCase, '''w''' ) as f:
for line in new_lines:
f.write(UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=None )-> str:
"""simple docstring"""
if fail is not None:
with open(UpperCAmelCase, '''r''' ) as f:
lowercase = {l.strip() for l in f.readlines()}
else:
lowercase = None
with open(UpperCAmelCase, '''r''' ) as f:
lowercase = f.readlines()
lowercase = defaultdict(UpperCAmelCase )
for line in correct_lines:
lowercase ,lowercase ,lowercase ,lowercase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
A_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 604 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be an \'int\' type' )
UpperCAmelCase__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
a = parser.parse_args()
a = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 109 | '''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """umt5"""
__lowerCAmelCase = ["""past_key_values"""]
def __init__( self , snake_case_=25_0112 , snake_case_=512 , snake_case_=64 , snake_case_=1024 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=128 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Dict = vocab_size
__UpperCAmelCase: Tuple = d_model
__UpperCAmelCase: Any = d_kv
__UpperCAmelCase: Any = d_ff
__UpperCAmelCase: Any = num_layers
__UpperCAmelCase: int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase: List[str] = num_heads
__UpperCAmelCase: Tuple = relative_attention_num_buckets
__UpperCAmelCase: Union[str, Any] = relative_attention_max_distance
__UpperCAmelCase: Union[str, Any] = dropout_rate
__UpperCAmelCase: Tuple = layer_norm_epsilon
__UpperCAmelCase: Union[str, Any] = initializer_factor
__UpperCAmelCase: List[str] = feed_forward_proj
__UpperCAmelCase: List[str] = use_cache
__UpperCAmelCase: Optional[Any] = self.feed_forward_proj.split("""-""" )
__UpperCAmelCase: Optional[Any] = act_info[-1]
__UpperCAmelCase: List[Any] = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase: str = """gelu_new"""
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.d_model
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.num_heads
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.num_layers
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__UpperCAmelCase: str = """past_encoder_sequence + sequence"""
__UpperCAmelCase: Dict = {0: """batch"""}
__UpperCAmelCase: int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__UpperCAmelCase: List[str] = {0: """batch""", 1: """decoder_sequence"""}
__UpperCAmelCase: Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase_ ( self ):
'''simple docstring'''
return 13
@property
def lowercase_ ( self ):
'''simple docstring'''
return 5e-4 | 523 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case : Tuple = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case : Dict = logging.getLogger()
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase__ = parser.parse_args()
return args.f
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any="eval" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , F"{split}_results.json" )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
return json.load(lowerCAmelCase__ )
raise ValueError(F"can't find {path}" )
__snake_case : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_flax_glue.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_clm_flax.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertLess(result['eval_perplexity'] , 1_00 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_summarization_flax.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_mlm_flax.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_ta_mlm_flax.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_flax_ner.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_qa.main()
lowerCAmelCase__ = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["""GLPNFeatureExtractor"""]
__snake_case : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 | 0 |
from math import factorial, pi
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 3_0 ) -> float:
if not isinstance(A_, (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(A_, A_ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
A__ : List[str] =float(A_ )
A__ : str =theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(A_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 3_0 ) -> float:
if not isinstance(A_, (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(A_, A_ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
A__ : Optional[int] =float(A_ )
A__ : List[str] =theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 416 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def A__ ( A_ ) -> Any:
return EnvironmentCommand()
def A__ ( A_ ) -> Optional[int]:
return EnvironmentCommand(args.accelerate_config_file )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def snake_case ( __A : ArgumentParser ):
"""simple docstring"""
_lowercase = parser.add_parser("env" )
download_parser.set_defaults(func=__A )
download_parser.add_argument(
"--accelerate-config_file" , default=__A , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__A )
def __init__( self : Dict , __A : Tuple , *__A : Optional[int] ):
"""simple docstring"""
_lowercase = accelerate_config_file
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = "not installed"
if is_safetensors_available():
import safetensors
_lowercase = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_lowercase = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowercase = "not installed"
_lowercase = _lowercase = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowercase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__A ):
_lowercase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowercase = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__A , __A )
else f"""\t{accelerate_config}"""
)
_lowercase = "not installed"
_lowercase = "NA"
if is_torch_available():
import torch
_lowercase = torch.__version__
_lowercase = torch.cuda.is_available()
_lowercase = "not installed"
_lowercase = "NA"
if is_tf_available():
import tensorflow as tf
_lowercase = tf.__version__
try:
# deprecated in v2.1
_lowercase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowercase = bool(tf.config.list_physical_devices("GPU" ) )
_lowercase = "not installed"
_lowercase = "not installed"
_lowercase = "not installed"
_lowercase = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_lowercase = flax.__version__
_lowercase = jax.__version__
_lowercase = jaxlib.__version__
_lowercase = jax.lib.xla_bridge.get_backend().platform
_lowercase = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__A ) )
return info
@staticmethod
def snake_case ( __A : Union[str, Any] ):
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 497 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger(__name__)
def lowerCAmelCase( __lowerCamelCase ):
__a = DPTConfig()
if "large" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
__a = [5, 11, 17, 23]
__a = [256, 512, 1024, 1024]
__a = (1, 384, 384)
if "ade" in checkpoint_url:
__a = True
__a = 150
__a = 'huggingface/label-files'
__a = 'ade20k-id2label.json'
__a = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase( __lowerCamelCase ):
__a = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__a = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__a = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
__a = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__a = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__a = name.replace('proj' , 'projection' )
if "blocks" in name:
__a = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__a = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
__a = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__a = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__a = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__a = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__a = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__a = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__a = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__a = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__a = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__a = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__a = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__a = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__a = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__a = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__a = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__a = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__a = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__a = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__a = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__a = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__a = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__a = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__a = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__a = name.replace('bn' , 'batch_norm' )
if "head" in name:
__a = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__a = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__a = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__a = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[: config.hidden_size, :]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase( ):
__a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a , __a = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
__a = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__a = state_dict.pop(__lowerCamelCase )
__a = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__a = DPTForSemanticSegmentation(__lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__a = 480 if 'ade' in checkpoint_url else 384
__a = DPTImageProcessor(size=__lowerCamelCase )
__a = prepare_img()
__a = image_processor(__lowerCamelCase , return_tensors='pt' )
# forward pass
__a = model(**__lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
# Assert logits
__a = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
__a = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowerCamelCase )
)
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
lowerCamelCase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 246 | def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 246 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , _a , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_a , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Optional[Any] = r".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE : int = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Any = key.replace(_a , _a)
if re.match(_a , _a):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Optional[int] = re.match(_a , _a).group(1)
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_a)//3}.linear.")
elif re.match(_a , _a):
SCREAMING_SNAKE_CASE : str = int(re.match(_a , _a).group(1))
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[str] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}.")
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : Dict = mixed_qkv.size(0) // 3
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Dict = query_layer
SCREAMING_SNAKE_CASE : str = key_layer
SCREAMING_SNAKE_CASE : List[str] = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def lowerCamelCase__ ( _a , _a , _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = init_clap(_a , enable_fusion=_a)
clap_model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(_a)
SCREAMING_SNAKE_CASE : Any = ClapConfig()
SCREAMING_SNAKE_CASE : Tuple = enable_fusion
SCREAMING_SNAKE_CASE : str = ClapModel(_a)
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a)
model.save_pretrained(_a)
transformers_config.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 25 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase (a_ ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (("""num_inference_steps""", 50),)
def __UpperCAmelCase ( self , **__UpperCamelCase )-> int:
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__UpperCamelCase )
return config
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self )-> Dict:
pass
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Dict:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self )-> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCAmelCase ( self )-> int:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
def __UpperCAmelCase ( self )-> Dict:
with self.assertRaises(__UpperCamelCase ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __UpperCAmelCase ( self )-> Any:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __UpperCAmelCase ( self )-> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 367 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _a ( __lowerCamelCase ):
def __init__( self : int , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = data
def __iter__( self : Optional[int] ):
'''simple docstring'''
for element in self.data:
yield element
def snake_case_ (_a : List[str]=True ):
UpperCAmelCase = Accelerator(even_batches=__a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case_ (_a : Accelerator , _a : int , _a : int , _a : bool = False ):
if iterable:
UpperCAmelCase = DummyIterableDataset(torch.as_tensor(range(__a ) ) )
else:
UpperCAmelCase = TensorDataset(torch.as_tensor(range(__a ) ) )
UpperCAmelCase = DataLoader(__a , batch_size=__a )
UpperCAmelCase = accelerator.prepare(__a )
return dl
def snake_case_ (_a : Accelerator , _a : int , _a : int , _a : List[int] , _a : List[int] , ):
UpperCAmelCase = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a )
UpperCAmelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case_ ():
UpperCAmelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case_ ():
UpperCAmelCase = create_accelerator(even_batches=__a )
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case_ ():
UpperCAmelCase = create_accelerator(even_batches=__a )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(__a )
UpperCAmelCase = create_dataloader(__a , dataset_size=3 , batch_size=1 )
UpperCAmelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__a ):
UpperCAmelCase = ddp_model(batch[0].float() )
UpperCAmelCase = output.sum()
loss.backward()
batch_idxs.append(__a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case_ (_a : List[Any] ):
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case_ ():
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = create_accelerator(even_batches=__a )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(__a )
UpperCAmelCase = create_dataloader(__a , dataset_size=3 , batch_size=1 )
UpperCAmelCase = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
UpperCAmelCase = train_dl.batch_sampler.even_batches
UpperCAmelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case_ ():
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = create_accelerator(even_batches=__a )
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
UpperCAmelCase = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
UpperCAmelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case_ ():
UpperCAmelCase = create_accelerator()
UpperCAmelCase = torch.nn.Linear(1 , 1 )
UpperCAmelCase = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case_ ():
UpperCAmelCase = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCAmelCase = accelerator.state.distributed_type
UpperCAmelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__a )
UpperCAmelCase = original_state
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
from __future__ import annotations
import math
class _a :
def __init__( self : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = size
# approximate the overall size of segment tree with given value
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
return idx * 2
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
return idx * 2 + 1
def A ( self : Dict , lowercase : int , lowercase : int , lowercase : int , lowercase : list[int] ):
'''simple docstring'''
if left_element == right_element:
UpperCAmelCase = a[left_element - 1]
else:
UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(lowercase ) , lowercase , lowercase , lowercase )
self.build(self.right(lowercase ) , mid + 1 , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
def A ( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase = val
if left_element != right_element:
UpperCAmelCase = val
UpperCAmelCase = val
UpperCAmelCase = True
UpperCAmelCase = True
return True
UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase , lowercase )
self.update(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
return True
def A ( self : Any , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase = (left_element + right_element) // 2
UpperCAmelCase = self.query(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = self.query(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase )
return max(lowercase , lowercase )
def __str__( self : Any ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowercase , lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A =15
A =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 358 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
UpperCamelCase_ : int
UpperCamelCase_ : Node | None = None
UpperCamelCase_ : Node | None = None
def _A( ) -> Node | None:
'''simple docstring'''
__lowercase = Node(1 )
__lowercase = Node(2 )
__lowercase = Node(3 )
__lowercase = Node(4 )
__lowercase = Node(5 )
return tree
def _A( UpperCamelCase__ : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A( UpperCamelCase__ : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A( UpperCamelCase__ : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A( UpperCamelCase__ : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A( UpperCamelCase__ : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
__lowercase = []
if root is None:
return output
__lowercase = deque([root] )
while process_queue:
__lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A( UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> Sequence[Node | None]:
'''simple docstring'''
__lowercase = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def _A( UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> Sequence[Node | None]:
'''simple docstring'''
__lowercase = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def _A( UpperCamelCase__ : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
__lowercase = []
__lowercase = 0
__lowercase = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
__lowercase = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
__lowercase = 0
return output
def _A( ) -> None: # Main function for testing.
'''simple docstring'''
__lowercase = make_tree()
print(F'In-order Traversal: {inorder(UpperCamelCase__ )}' )
print(F'Pre-order Traversal: {preorder(UpperCamelCase__ )}' )
print(F'Post-order Traversal: {postorder(UpperCamelCase__ )}' , '''\n''' )
print(F'Height of Tree: {height(UpperCamelCase__ )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(UpperCamelCase__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 |
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 )
__lowercase = [60]
__lowercase = [10]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 5 )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 50
__lowercase = [60, 100, 120]
__lowercase = [10, 20, 30]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 332 | 1 |
"""simple docstring"""
def UpperCAmelCase ( ):
_lowerCAmelCase:Union[str, Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(__A )[-10:]
if __name__ == "__main__":
print(solution())
| 701 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=snake_case )
return parser.parse_args()
def UpperCAmelCase ( ):
_lowerCAmelCase:int = parse_args()
# Import training_script as a module.
_lowerCAmelCase:Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase:Optional[int] = script_fpath.stem
_lowerCAmelCase:Any = importlib.import_module(snake_case )
# Patch sys.argv
_lowerCAmelCase:Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 439 | 0 |
"""simple docstring"""
import math
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE_ = '''Enter the base and the power separated by a comma: '''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE_ = res(xa, ya)
SCREAMING_SNAKE_CASE_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 373 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
UpperCAmelCase = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
UpperCAmelCase = arr[j]
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
for i, outer in enumerate(lowerCAmelCase ):
UpperCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase = inner
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 373 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 706 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 325 | 0 |
"""simple docstring"""
import numpy as np
class lowercase:
'''simple docstring'''
def __init__( self: Any ):
'''simple docstring'''
_snake_case : Tuple = (0, 0)
_snake_case : Any = None
_snake_case : str = 0
_snake_case : int = 0
_snake_case : int = 0
def __eq__( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
return self.position == cell.position
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
print(self.position )
class lowercase:
'''simple docstring'''
def __init__( self: Optional[int], a_: Optional[int]=(5, 5) ):
'''simple docstring'''
_snake_case : int = np.zeros(a_ )
_snake_case : Tuple = world_size[0]
_snake_case : Optional[Any] = world_size[1]
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
print(self.w )
def UpperCamelCase_ ( self: Dict, a_: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_snake_case : List[Any] = cell.position[0]
_snake_case : Union[str, Any] = cell.position[1]
_snake_case : Union[str, Any] = []
for n in neughbour_cord:
_snake_case : Union[str, Any] = current_x + n[0]
_snake_case : Optional[int] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_snake_case : List[Any] = Cell()
_snake_case : int = (x, y)
_snake_case : List[str] = cell
neighbours.append(a_ )
return neighbours
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : List[str] = []
_open.append(snake_case__ )
while _open:
_snake_case : Optional[Any] = np.argmin([n.f for n in _open] )
_snake_case : Optional[int] = _open[min_f]
_closed.append(_open.pop(snake_case__ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case__ ):
for c in _closed:
if c == n:
continue
_snake_case : Optional[int] = current.g + 1
_snake_case , _snake_case : Tuple = n.position
_snake_case , _snake_case : Union[str, Any] = goal.position
_snake_case : List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
_snake_case : Tuple = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case__ )
_snake_case : Tuple = []
while current.parent is not None:
path.append(current.position )
_snake_case : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 609 |
"""simple docstring"""
import argparse
from collections import defaultdict
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(snake_case__ , """r""" ) as f:
_snake_case : Optional[Any] = f.readlines()
_snake_case : Tuple = F"class {class_name}("
_snake_case : Tuple = F"{4 * ' '}def {test_name}("
_snake_case : Optional[int] = F"{8 * ' '}{correct_line.split()[0]}"
_snake_case : Union[str, Any] = F"{16 * ' '}{correct_line.split()[0]}"
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : List[Any] = 0
_snake_case : List[Any] = 0
_snake_case : List[str] = []
for line in lines:
if line.startswith(snake_case__ ):
_snake_case : Union[str, Any] = True
elif in_class and line.startswith(snake_case__ ):
_snake_case : str = True
elif in_class and in_func and (line.startswith(snake_case__ ) or line.startswith(snake_case__ )):
_snake_case : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
_snake_case : int = False
else:
new_lines.append(snake_case__ )
with open(snake_case__ , """w""" ) as f:
for line in new_lines:
f.write(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any=None ):
"""simple docstring"""
if fail is not None:
with open(snake_case__ , """r""" ) as f:
_snake_case : List[Any] = {l.strip() for l in f.readlines()}
else:
_snake_case : Union[str, Any] = None
with open(snake_case__ , """r""" ) as f:
_snake_case : Union[str, Any] = f.readlines()
_snake_case : List[Any] = defaultdict(snake_case__ )
for line in correct_lines:
_snake_case , _snake_case , _snake_case , _snake_case : List[Any] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 609 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Any = """xlm-prophetnet"""
a_: Any = ["""past_key_values"""]
a_: List[str] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Tuple , lowerCamelCase_ : Optional[float] = 0.1 , lowerCamelCase_ : Optional[Union[str, Callable]] = "gelu" , lowerCamelCase_ : Optional[int] = 3_0522 , lowerCamelCase_ : Optional[int] = 1024 , lowerCamelCase_ : Optional[int] = 4096 , lowerCamelCase_ : Optional[int] = 12 , lowerCamelCase_ : Optional[int] = 16 , lowerCamelCase_ : Optional[int] = 4096 , lowerCamelCase_ : Optional[int] = 12 , lowerCamelCase_ : Optional[int] = 16 , lowerCamelCase_ : Optional[float] = 0.1 , lowerCamelCase_ : Optional[float] = 0.1 , lowerCamelCase_ : Optional[int] = 512 , lowerCamelCase_ : Optional[float] = 0.02 , lowerCamelCase_ : Optional[bool] = True , lowerCamelCase_ : Optional[bool] = True , lowerCamelCase_ : Optional[int] = 0 , lowerCamelCase_ : Optional[int] = 2 , lowerCamelCase_ : Optional[int] = 32 , lowerCamelCase_ : Optional[int] = 128 , lowerCamelCase_ : Optional[bool] = False , lowerCamelCase_ : Optional[float] = 0.0 , lowerCamelCase_ : Optional[bool] = True , lowerCamelCase_ : Optional[int] = 0 , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Optional[int] = 2 , **lowerCamelCase_ : Tuple , ):
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =encoder_ffn_dim
_lowerCAmelCase =num_encoder_layers
_lowerCAmelCase =num_encoder_attention_heads
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =num_decoder_layers
_lowerCAmelCase =num_decoder_attention_heads
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =init_std # Normal(0, this parameter)
_lowerCAmelCase =activation_function
# parameters for xlmprophetnet
_lowerCAmelCase =ngram
_lowerCAmelCase =num_buckets
_lowerCAmelCase =relative_max_distance
_lowerCAmelCase =disable_ngram_loss
_lowerCAmelCase =eps
# 3 Types of Dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =dropout
_lowerCAmelCase =use_cache
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Dict ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 719 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =BlipImageProcessor()
_lowerCAmelCase =BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
_lowerCAmelCase =BlipProcessor(lowerCamelCase_ , lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).tokenizer
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase_ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCAmelCase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase =[Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase =self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_lowerCAmelCase =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =image_processor(lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =processor(images=lowerCamelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =processor(text=lowerCamelCase_ )
_lowerCAmelCase =tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase =processor.batch_decode(lowerCamelCase_ )
_lowerCAmelCase =tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 149 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __A :
def __init__(self : str , __a : Any , __a : str=13 , __a : List[str]=2 , __a : Any=24 , __a : Any=16 , __a : List[str]=True , __a : Tuple=True , __a : Any=32 , __a : str=5 , __a : Optional[int]=4 , __a : List[Any]=37 , __a : Optional[Any]="gelu" , __a : str=0.1 , __a : Tuple=0.1 , __a : Tuple=10 , __a : Any=0.02 , __a : Any=None , __a : Union[str, Any]=2 , __a : Tuple=2 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = frequency_stride
UpperCAmelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase_ = frequency_out_dimension * time_out_dimension
UpperCAmelCase_ = num_patches + 2
def _lowercase (self : List[str] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, input_values, labels
def _lowercase (self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase (self : str , __a : Any , __a : str , __a : str ):
UpperCAmelCase_ = ASTModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
a__ : int = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
a__ : int = False
a__ : Union[str, Any] = False
a__ : List[str] = False
a__ : Any = False
def _lowercase (self : Optional[Any] , __a : str , __a : List[Any] , __a : Optional[Any] , __a : str , __a : str ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ASTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["input_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def _lowercase (self : List[str] ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ASTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(snake_case_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : str ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__a )
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ , UpperCAmelCase_ = prepare_audio()
UpperCAmelCase_ = audio.squeeze().numpy()
UpperCAmelCase_ = feature_extractor(__a , sampling_rate=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 78 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
UpperCAmelCase : List[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case )
UpperCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case )
UpperCAmelCase : Optional[Any] = resnets
UpperCAmelCase : List[str] = attentions
if self.add_downsample:
UpperCAmelCase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case , snake_case , snake_case , snake_case=True ):
'''simple docstring'''
UpperCAmelCase : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase : Union[str, Any] = resnet(snake_case , snake_case , deterministic=snake_case )
UpperCAmelCase : Union[str, Any] = attn(snake_case , snake_case , deterministic=snake_case )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase : List[Any] = self.downsamplers_a(snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for i in range(self.num_layers ):
UpperCAmelCase : Optional[int] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase : str = FlaxResnetBlockaD(
in_channels=snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case )
UpperCAmelCase : List[str] = resnets
if self.add_downsample:
UpperCAmelCase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case , snake_case , snake_case=True ):
'''simple docstring'''
UpperCAmelCase : Any = ()
for resnet in self.resnets:
UpperCAmelCase : Optional[Any] = resnet(snake_case , snake_case , deterministic=snake_case )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase : Any = self.downsamplers_a(snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase : str = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case )
UpperCAmelCase : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case )
UpperCAmelCase : Union[str, Any] = resnets
UpperCAmelCase : Optional[int] = attentions
if self.add_upsample:
UpperCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase : Any = res_hidden_states_tuple[-1]
UpperCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase : List[Any] = resnet(snake_case , snake_case , deterministic=snake_case )
UpperCAmelCase : int = attn(snake_case , snake_case , deterministic=snake_case )
if self.add_upsample:
UpperCAmelCase : List[Any] = self.upsamplers_a(snake_case )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for i in range(self.num_layers ):
UpperCAmelCase : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase : Tuple = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case )
UpperCAmelCase : Dict = resnets
if self.add_upsample:
UpperCAmelCase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case , snake_case , snake_case , snake_case=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase : int = res_hidden_states_tuple[-1]
UpperCAmelCase : Optional[int] = res_hidden_states_tuple[:-1]
UpperCAmelCase : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase : List[Any] = resnet(snake_case , snake_case , deterministic=snake_case )
if self.add_upsample:
UpperCAmelCase : Dict = self.upsamplers_a(snake_case )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase : str = []
for _ in range(self.num_layers ):
UpperCAmelCase : Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case )
UpperCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case )
UpperCAmelCase : List[str] = resnets
UpperCAmelCase : Tuple = attentions
def __call__( self , snake_case , snake_case , snake_case , snake_case=True ):
'''simple docstring'''
UpperCAmelCase : str = self.resnets[0](snake_case , snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase : str = attn(snake_case , snake_case , deterministic=snake_case )
UpperCAmelCase : List[str] = resnet(snake_case , snake_case , deterministic=snake_case )
return hidden_states
| 609 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
a : str = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Tuple = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase : int = g.get_repo("huggingface/diffusers" )
UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase : str = sorted(issue.get_comments() , key=lambda __magic_name__ : i.created_at , reverse=__magic_name__ )
UpperCAmelCase : Union[str, Any] = comments[0] if len(__magic_name__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 609 | 1 |
'''simple docstring'''
import os
import sys
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 18 |
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = 0
for ch in input_str:
__lowerCamelCase = ord(UpperCamelCase__ )
__lowerCamelCase = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A__ = logging.get_logger(__name__)
A__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
A__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowercase ( a_ : str ) -> Tuple:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__magic_name__ = model_type_to_module_name(a_ )
__magic_name__ = importlib.import_module(F'.{module_name}' ,'transformers.models' )
try:
return getattr(a_ ,a_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a_ ,'__name__' ,a_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__magic_name__ = importlib.import_module('transformers' )
if hasattr(a_ ,a_ ):
return getattr(a_ ,a_ )
return None
def _lowercase ( a_ : Union[str, os.PathLike] ,a_ : Optional[Union[str, os.PathLike]] = None ,a_ : bool = False ,a_ : bool = False ,a_ : Optional[Dict[str, str]] = None ,a_ : Optional[Union[bool, str]] = None ,a_ : Optional[str] = None ,a_ : bool = False ,**a_ : Tuple ,) -> List[str]:
'''simple docstring'''
__magic_name__ = get_file_from_repo(
a_ ,a_ ,cache_dir=a_ ,force_download=a_ ,resume_download=a_ ,proxies=a_ ,use_auth_token=a_ ,revision=a_ ,local_files_only=a_ ,)
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(a_ ,encoding='utf-8' ) as reader:
return json.load(a_ )
class __UpperCamelCase :
def __init__( self: int ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( cls: str , __UpperCamelCase: int , **__UpperCamelCase: List[str] ):
'''simple docstring'''
__magic_name__ = kwargs.pop('config' , __UpperCamelCase )
__magic_name__ = kwargs.pop('trust_remote_code' , __UpperCamelCase )
__magic_name__ = True
__magic_name__, __magic_name__ = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCamelCase , **__UpperCamelCase )
__magic_name__ = config_dict.get('feature_extractor_type' , __UpperCamelCase )
__magic_name__ = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__magic_name__ = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# It could be in `config.feature_extractor_type``
__magic_name__ = getattr(__UpperCamelCase , 'feature_extractor_type' , __UpperCamelCase )
if hasattr(__UpperCamelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__magic_name__ = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__magic_name__ = feature_extractor_class_from_name(__UpperCamelCase )
__magic_name__ = feature_extractor_auto_map is not None
__magic_name__ = feature_extractor_class is not None or type(__UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
__magic_name__ = resolve_trust_remote_code(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if has_remote_code and trust_remote_code:
__magic_name__ = get_class_from_dynamic_module(
__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
__magic_name__ = kwargs.pop('code_revision' , __UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
__magic_name__ = FEATURE_EXTRACTOR_MAPPING[type(__UpperCamelCase )]
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCamelCase: int , __UpperCamelCase: Optional[Any] ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__UpperCamelCase , __UpperCamelCase )
| 184 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowercase ( a_ : str ,a_ : str ,a_ : str ,a_ : Path ,a_ : str = None ,a_ : str = None ,a_ : str = None ,) -> Tuple:
'''simple docstring'''
if config_name_or_path is None:
__magic_name__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__magic_name__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__magic_name__ = question_encoder_name_or_path
__magic_name__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__magic_name__ = RagConfig.from_pretrained(a_ )
__magic_name__ = AutoConfig.from_pretrained(a_ )
__magic_name__ = AutoConfig.from_pretrained(a_ )
__magic_name__ = gen_config
__magic_name__ = question_encoder_config
__magic_name__ = model_class.from_pretrained_question_encoder_generator(
a_ ,a_ ,config=a_ )
rag_model.save_pretrained(a_ )
# Sanity check.
model_class.from_pretrained(a_ )
# Save tokenizers.
__magic_name__ = AutoTokenizer.from_pretrained(a_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__magic_name__ = AutoTokenizer.from_pretrained(a_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A__ = parser.parse_args()
A__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 184 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 35 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
a = 3_0_0 # TEMPERATURE (unit = K)
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _A ( A__ ):
"""simple docstring"""
re.sub('''<n>''' , '''''' , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
def UpperCAmelCase_ ( snake_case__ ) -> tuple[int, int]:
"""simple docstring"""
try:
lowerCAmelCase__ = float(snake_case__ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCAmelCase__ = decimal - int(snake_case__ )
if fractional_part == 0:
return int(snake_case__ ), 1
else:
lowerCAmelCase__ = len(str(snake_case__ ).split('.' )[1] )
lowerCAmelCase__ = int(decimal * (10**number_of_frac_digits) )
lowerCAmelCase__ = 10**number_of_frac_digits
lowerCAmelCase__ , lowerCAmelCase__ = denominator, numerator
while True:
lowerCAmelCase__ = dividend % divisor
if remainder == 0:
break
lowerCAmelCase__ , lowerCAmelCase__ = divisor, remainder
lowerCAmelCase__ , lowerCAmelCase__ = numerator / divisor, denominator / divisor
return int(snake_case__ ), int(snake_case__ )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(8_9.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 193 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_ = 13 ,a_ = 64 ,a_ = 2 ,a_ = 3 ,a_ = 3 ,a_ = True ,a_ = True ,a_ = 128 ,a_=[16, 32, 64, 128] ,a_ = 7 ,a_ = 4 ,a_ = 37 ,a_ = "gelu" ,a_ = 0.1 ,a_ = 0.1 ,a_ = 10 ,a_ = 0.02 ,a_ = 2 ,a_ = 1 ,a_ = 128 ,a_ = [2, 2, 2, 2] ,a_ = 2 ,a_ = 2 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = num_attention_outputs
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = embed_dim + 1
lowerCAmelCase__ = resolution
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = dim
lowerCAmelCase__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=a_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerModel(config=a_ )
lowerCAmelCase__ = model(a_ ,training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFEfficientFormerForImageClassification(a_ )
lowerCAmelCase__ = model(a_ ,labels=a_ ,training=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFEfficientFormerForImageClassification(a_ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self ,config_class=a_ ,has_text_modality=a_ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
def check_hidden_states_output(a_ ,a_ ,a_ ):
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) ,a_ )
if hasattr(self.model_tester ,'encoder_seq_length' ):
lowerCAmelCase__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCAmelCase__ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
lowerCAmelCase__ = outputs.decoder_hidden_states
self.asseretIsInstance(a_ ,(list, tuple) )
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'decoder_seq_length' ,a_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_=False ):
"""simple docstring"""
lowerCAmelCase__ = super()._prepare_for_class(a_ ,a_ ,return_labels=a_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFEfficientFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester ,'seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'encoder_seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'key_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'chunk_length' ,a_ )
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes' ):
lowerCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase__ = model_class(a_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase__ = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=a_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase__ = model(a_ )
self.assertTrue(outputs_dict is not None )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**a_ ,training=a_ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**a_ ,training=a_ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 193 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase_ : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __lowercase( __snake_case : Any=True ) -> str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCamelCase ) )
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = None
lowercase__ = None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
with TemporaryDirectory() as tmp_dir:
__snake_case = dataset_module_factory(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ )
__snake_case = import_main_class(dataset_module.module_path , dataset=SCREAMING_SNAKE_CASE_ )
__snake_case = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ , hash=dataset_module.hash , )
__snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=SCREAMING_SNAKE_CASE_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
__snake_case = cached_path(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
@pytest.mark.integration
def __lowercase( __snake_case : Optional[Any] ) -> Optional[int]:
__snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
__snake_case = dataset_module_factory('wikipedia' ,cache_dir=__snake_case )
__snake_case = import_main_class(dataset_module.module_path )
__snake_case = builder_cls(
cache_dir=__snake_case ,config_name='20220301.frr' ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__snake_case = None
builder_instance.download_and_prepare()
__snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowercase( __snake_case : Union[str, Any] ) -> List[Any]:
__snake_case = dataset_module_factory('wikipedia' ,cache_dir=__snake_case )
__snake_case = import_main_class(dataset_module.module_path ,dataset=__snake_case )
__snake_case = builder_cls(
cache_dir=__snake_case ,config_name='20220301.frr' ,hash=dataset_module.hash ,)
__snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__snake_case ,__snake_case )
assert "train" in ds
assert isinstance(ds['train'] ,__snake_case )
assert next(iter(ds['train'] ) )
| 711 |
lowerCamelCase_ : List[str] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowerCamelCase_ : List[str] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def __lowercase( __snake_case : float ,__snake_case : str ,__snake_case : str ) -> float:
__snake_case = from_type.lower().strip('s' )
__snake_case = to_type.lower().strip('s' )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
if from_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
__snake_case = METRIC_CONVERSION[from_sanitized]
__snake_case = METRIC_CONVERSION[to_sanitized]
__snake_case = 1
if from_exponent > to_exponent:
__snake_case = from_exponent - to_exponent
else:
__snake_case = -(to_exponent - from_exponent)
return value * pow(10 ,__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(snake_case__ : int , snake_case__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCAmelCase = update_area_of_max_square(snake_case__ , col + 1 )
lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
lowerCAmelCase = update_area_of_max_square(row + 1 , snake_case__ )
if mat[row][col]:
lowerCAmelCase = 1 + min([right, diagonal, down] )
lowerCAmelCase = max(largest_square_area[0] , snake_case__ )
return sub_problem_sol
else:
return 0
lowerCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCAmelCase = update_area_of_max_square_using_dp_array(snake_case__ , col + 1 , snake_case__ )
lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case__ )
lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , snake_case__ , snake_case__ )
if mat[row][col]:
lowerCAmelCase = 1 + min([right, diagonal, down] )
lowerCAmelCase = max(largest_square_area[0] , snake_case__ )
lowerCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCAmelCase = [0]
lowerCAmelCase = [[-1] * cols for _ in range(snake_case__ )]
update_area_of_max_square_using_dp_array(0 , 0 , snake_case__ )
return largest_square_area[0]
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
'''simple docstring'''
lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase = dp_array[row][col + 1]
lowerCAmelCase = dp_array[row + 1][col + 1]
lowerCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCAmelCase = 1 + min(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = max(dp_array[row][col] , snake_case__ )
else:
lowerCAmelCase = 0
return largest_square_area
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
'''simple docstring'''
lowerCAmelCase = [0] * (cols + 1)
lowerCAmelCase = [0] * (cols + 1)
lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase = current_row[col + 1]
lowerCAmelCase = next_row[col + 1]
lowerCAmelCase = next_row[col]
if mat[row][col] == 1:
lowerCAmelCase = 1 + min(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = max(current_row[col] , snake_case__ )
else:
lowerCAmelCase = 0
lowerCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 169 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = GPTSwaTokenizer
_a = False
_a = True
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = """This is a test"""
lowerCAmelCase = """This is a test"""
return input_text, output_text
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __lowercase ( self : Any ):
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase ) , 2000 )
def __lowercase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [465, 287, 265, 631, 842] )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __lowercase ( self : int ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCAmelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(lowerCAmelCase ) , lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(lowerCAmelCase ) , lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
lowerCAmelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCAmelCase = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCAmelCase , )
| 169 | 1 |
# Lint as: python3
import itertools
import os
import re
A : str = re.compile(r'([A-Z]+)([A-Z][a-z])')
A : List[str] = re.compile(r'([a-z\d])([A-Z])')
A : str = re.compile(r'(?<!_)_(?!_)')
A : Dict = re.compile(r'(_{2,})')
A : Optional[int] = r'^\w+(\.\w+)*$'
A : Optional[int] = r'<>:/\|?*'
def _lowerCAmelCase ( _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = _uppercase_uppercase_re.sub(r"\1_\2" , _lowerCAmelCase )
__snake_case = _lowercase_uppercase_re.sub(r"\1_\2" , _lowerCAmelCase )
return name.lower()
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = _single_underscore_re.split(_lowerCAmelCase )
__snake_case = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != "" )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , _lowerCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
__snake_case = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
return F'''{filepath}*'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if shard_lengths:
__snake_case = len(_lowerCAmelCase )
__snake_case = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
__snake_case = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__snake_case = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 473 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : Any = logging.get_logger(__name__)
class UpperCamelCase( _a ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 473 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :int = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :int = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 93 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 1 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 704 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 34 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
UpperCamelCase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = ElectraTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> str:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
A__ = do_lower_case
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[str]:
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 104 | 0 |
def A ( _lowercase = 100 ):
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = n + 1 # maximum limit
for a in range(2 , _lowercase ):
for b in range(2 , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = a**b # calculates the current power
collect_powers.add(_lowercase ) # adds the result to the set
return len(_lowercase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 34 | from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DebertaTokenizer
snake_case_ = True
snake_case_ = DebertaTokenizerFast
def lowerCAmelCase ( self : Any )-> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case = {"""unk_token""": """[UNK]"""}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCAmelCase ( self : Optional[Any] , **__snake_case : Union[str, Any] )-> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : int )-> Any:
snake_case = """lower newer"""
snake_case = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : str )-> Any:
snake_case = self.get_tokenizer()
snake_case = """lower newer"""
snake_case = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = self.get_tokenizer()
snake_case = tokenizer("""Hello""" , """World""" )
snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
snake_case = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
snake_case = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
snake_case = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
snake_case = tokenizer(__snake_case , padding=__snake_case )
snake_case = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding["""input_ids"""]]
# fmt: off
snake_case = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 369 | 0 |
import numpy as np
def __magic_name__ ( lowercase_ ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 |
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """WhisperFeatureExtractor"""
lowercase = """WhisperTokenizer"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Any:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE , language=SCREAMING_SNAKE_CASE , no_timestamps=SCREAMING_SNAKE_CASE )
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("audio" , SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("text" , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase = self.feature_extractor(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase = encodings["input_ids"]
return inputs
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="np" ) -> int:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
| 414 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 628 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SwinvaConfig()
UpperCamelCase_ = swinva_name.split("_" )
UpperCamelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCamelCase_ = int(name_split[3][-3:] )
else:
UpperCamelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase_ = int(name_split[2][-2:] )
else:
UpperCamelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 6, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCamelCase_ = 1_2_8
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (4, 8, 1_6, 3_2)
else:
UpperCamelCase_ = 1_9_2
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
UpperCamelCase_ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase_ = 2_1_8_4_1
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-22k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = img_size
UpperCamelCase_ = num_classes
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
return config
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase_ = "encoder." + name
if "attn.proj" in name:
UpperCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase_ = "layernorm.bias"
if "head" in name:
UpperCamelCase_ = name.replace("head" , "classifier" )
else:
UpperCamelCase_ = "swinv2." + name
return name
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase_ = key.split("." )
UpperCamelCase_ = int(key_split[1] )
UpperCamelCase_ = int(key_split[3] )
UpperCamelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
else:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[
dim : dim * 2
]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = val
return orig_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
UpperCamelCase_ = get_swinva_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
UpperCamelCase_ = timm_model(inputs["pixel_values"] )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 628 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Dict = getLogger(__name__)
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : int = 8, UpperCamelCase__ : int = 1024, UpperCamelCase__ : List[Any]="val", UpperCamelCase__ : int=None, UpperCamelCase__ : str=False, UpperCamelCase__ : int="summarization", UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : List[Any]=1, UpperCamelCase__ : Dict = None, UpperCamelCase__ : Optional[int]="", **UpperCamelCase__ : str, ):
__A : Dict = str(UpperCamelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl', rank=UpperCamelCase__ )
__A : Union[str, Any] = Path(UpperCamelCase__ )
__A : Optional[int] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(UpperCamelCase__ )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).cuda()
if fpaa:
__A : Optional[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCamelCase__, UpperCamelCase__ ) # update config with task specific params
__A : Any = generate_kwargs.pop('num_beams', model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__A : int = num_return_sequences
__A : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__A : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
__A : List[Any] = prefix or getattr(model.config, 'prefix', '' ) or ''
__A : Tuple = SeqaSeqDataset(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, max_target_length=1024, type_path=UpperCamelCase__, n_obs=UpperCamelCase__, prefix=UpperCamelCase__, **UpperCamelCase__, )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__A : Any = ds.make_sortish_sampler(UpperCamelCase__, distributed=UpperCamelCase__, add_extra_examples=UpperCamelCase__, shuffle=UpperCamelCase__ )
__A : Union[str, Any] = DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=UpperCamelCase__, collate_fn=ds.collate_fn )
__A : Tuple = []
for batch in tqdm(UpperCamelCase__ ):
__A : Any = model.generate(
input_ids=batch['input_ids'].to(model.device ), attention_mask=batch['attention_mask'].to(model.device ), num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, **UpperCamelCase__, )
__A : Dict = tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
__A : List[str] = batch['ids']
if num_return_sequences > 1:
__A : str = chunks(UpperCamelCase__, UpperCamelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCamelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCamelCase__, UpperCamelCase__ )
return results, sampler.num_replicas
def _lowercase ( ):
__A : Optional[Any] = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir', type=UpperCamelCase__, help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name', type=UpperCamelCase__, help='like facebook/bart-large-cnn,t5-base, etc.', default='sshleifer/distilbart-xsum-12-3', )
parser.add_argument('--save_dir', type=UpperCamelCase__, help='where to save', default='tmp_gen' )
parser.add_argument('--max_source_length', type=UpperCamelCase__, default=UpperCamelCase__ )
parser.add_argument(
'--type_path', type=UpperCamelCase__, default='test', help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task', type=UpperCamelCase__, default='summarization', help='used for task_specific_params + metrics' )
parser.add_argument('--bs', type=UpperCamelCase__, default=8, required=UpperCamelCase__, help='batch size' )
parser.add_argument(
'--local_rank', type=UpperCamelCase__, default=-1, required=UpperCamelCase__, help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__, help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences', type=UpperCamelCase__, default=1, required=UpperCamelCase__, help='How many sequences to return' )
parser.add_argument(
'--sync_timeout', type=UpperCamelCase__, default=600, required=UpperCamelCase__, help='How long should master process wait for other processes to finish.', )
parser.add_argument('--src_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument('--tgt_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument(
'--prefix', type=UpperCamelCase__, required=UpperCamelCase__, default=UpperCamelCase__, help='will be added to the begininng of src examples' )
parser.add_argument('--fp16', action='store_true' )
parser.add_argument('--debug', action='store_true' )
__A : int = time.time()
__A ,__A : int = parser.parse_known_args()
__A : List[str] = parse_numeric_n_bool_cl_kwargs(UpperCamelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__A : List[str] = Path(args.save_dir + '_tmp' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) # this handles locking.
__A : Optional[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__A : List[str] = {}
if args.src_lang is not None:
__A : Dict = args.src_lang
if args.tgt_lang is not None:
__A : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCamelCase__ )
__A ,__A : List[Any] = eval_data_dir(
args.data_dir, UpperCamelCase__, args.model_name, type_path=args.type_path, bs=args.bs, fpaa=args.fpaa, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=UpperCamelCase__, **UpperCamelCase__, )
if args.local_rank <= 0:
__A : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
__A : Dict = gather_results_from_each_node(UpperCamelCase__, UpperCamelCase__, args.sync_timeout )
__A : Union[str, Any] = combine_partial_results(UpperCamelCase__ )
if args.num_return_sequences > 1:
__A : Any = save_dir.joinpath('pseudolabel_results.json' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(UpperCamelCase__, UpperCamelCase__ )
return
__A : int = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCamelCase__ ) as f:
__A : str = [x.rstrip() for x in f.readlines()][: len(UpperCamelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__A : Tuple = 'translation' in args.task
__A : Union[str, Any] = calculate_bleu if calc_bleu else calculate_rouge
__A : Optional[Any] = 'bleu' if calc_bleu else 'rouge'
__A : Dict = score_fn(UpperCamelCase__, UpperCamelCase__ )
__A : Any = len(UpperCamelCase__ )
__A : Tuple = time.time() - start_time
__A : Tuple = round(runtime / metrics['n_obs'], 4 )
__A : int = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__A : List[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__ )
print(UpperCamelCase__ )
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : List[str] ):
__A : List[str] = []
for partial_result in partial_results:
records.extend(UpperCamelCase__ )
__A : List[Any] = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["id"] )
__A : Dict = [x['pred'] for x in records]
return preds
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ):
# WAIT FOR lots of .json files
__A : Dict = time.time()
logger.info('waiting for all nodes to finish' )
__A : int = None
while (time.time() - start_wait) < timeout:
__A : List[Any] = list(save_dir.glob('rank_*.json' ) )
if len(UpperCamelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__A : List[Any] = lmap(UpperCamelCase__, UpperCamelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 540 | 0 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """prophetnet.tokenizer"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
UpperCamelCase_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
UpperCamelCase_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase : List[str] = token.rstrip("""\n""" )
_lowerCAmelCase : Optional[Any] = index
return vocab
class a_ (__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="[SEP]" , snake_case_="[SEP]" , snake_case_="[SEP]" , snake_case_="[UNK]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
_lowerCAmelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCAmelCase : Tuple = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0 ):
_lowerCAmelCase : Tuple = f'[unused{i}]'
_lowerCAmelCase : Union[str, Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCAmelCase : Dict = 1_2
_lowerCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase__ )
def __getstate__( self ):
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : List[Any] = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
return len(self.sp_model ) + self.fairseq_offset
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Dict = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 384 |
'''simple docstring'''
from __future__ import annotations
import queue
class snake_case__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = data
snake_case : Tuple = None
snake_case : int = None
def _UpperCamelCase ( ) -> TreeNode:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
snake_case : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
snake_case : queue.Queue = queue.Queue()
snake_case : List[Any] = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : List[str] = q.get()
snake_case : List[str] = F'Enter the left node of {node_found.data}: '
snake_case : List[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : List[str] = left_node
q.put(SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = F'Enter the right node of {node_found.data}: '
snake_case : Optional[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : Union[str, Any] = right_node
q.put(SCREAMING_SNAKE_CASE__ )
raise
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : Any = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : str = []
while not q.empty():
snake_case : int = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = n.left
# end of while means current node doesn't have left child
snake_case : List[Any] = stack.pop()
# start to traverse its right child
snake_case : Any = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Dict = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = n.left
snake_case : Union[str, Any] = stack.pop()
print(n.data , end=''',''' )
snake_case : int = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case ,snake_case : Dict = [], []
snake_case : Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
snake_case ,snake_case : Any = divmod(width - len(SCREAMING_SNAKE_CASE__ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
lowercase__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 638 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__( snake_case__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
snake_case__ =parent
snake_case__ =config_class
snake_case__ =has_text_modality
snake_case__ =kwargs
snake_case__ =common_properties
def _lowercase ( self ) -> str:
snake_case__ =self.config_class(**self.inputs_dict )
snake_case__ =(
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_UpperCAmelCase ):
try:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(
getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_UpperCAmelCase , _UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_UpperCAmelCase ):
try:
snake_case__ =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_UpperCAmelCase , _UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.config_class(**self.inputs_dict )
snake_case__ =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
snake_case__ =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =os.path.join(_UpperCAmelCase , 'config.json' )
config_first.to_json_file(_UpperCAmelCase )
snake_case__ =self.config_class.from_json_file(_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_UpperCAmelCase )
snake_case__ =self.config_class.from_pretrained(_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.config_class(**self.inputs_dict )
snake_case__ ='test'
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
config_first.save_pretrained(_UpperCAmelCase )
snake_case__ =self.config_class.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case__ =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self ) -> Dict:
if self.config_class.is_composition:
return
snake_case__ =self.config_class()
self.parent.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(_UpperCAmelCase )
snake_case__ =self.config_class(**_UpperCAmelCase )
snake_case__ =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_UpperCAmelCase , _UpperCAmelCase ) != value:
wrong_values.append((key, getattr(_UpperCAmelCase , _UpperCAmelCase ), value) )
if len(_UpperCAmelCase ) > 0:
snake_case__ ='\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 714 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a ( ) -> str:
snake_case__ , snake_case__ =9, 14 # noqa: F841
snake_case__ =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case__ =defaultdict(UpperCamelCase_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case__ =mst(UpperCamelCase_ )
snake_case__ =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case__ =tuple(answer[:2] )
snake_case__ =tuple(edge[::-1] )
assert edge in result or reverse in result
| 581 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "segformer"
def __init__(self : List[Any] , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Dict=[32, 64, 160, 256] , UpperCAmelCase_ : List[Any]=[7, 3, 3, 3] , UpperCAmelCase_ : int=[4, 2, 2, 2] , UpperCAmelCase_ : Dict=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =num_channels
lowerCamelCase__: Optional[Any] =num_encoder_blocks
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: str =sr_ratios
lowerCamelCase__: Dict =hidden_sizes
lowerCamelCase__: Dict =patch_sizes
lowerCamelCase__: str =strides
lowerCamelCase__: Dict =mlp_ratios
lowerCamelCase__: Dict =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =classifier_dropout_prob
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Tuple =drop_path_rate
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: int =decoder_hidden_size
lowerCamelCase__: Tuple =kwargs.get("reshape_last_stage" , UpperCAmelCase_)
lowerCamelCase__: List[str] =semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->float:
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return 12
| 59 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A__ : List[str] = pytest.mark.integration
A__ : List[Any] = {"""comet"""}
A__ : str = importlib.util.find_spec("""fairseq""") is not None
A__ : str = {"""code_eval"""}
A__ : List[Any] = os.name == """nt"""
A__ : Optional[Any] = {"""bertscore""", """frugalscore""", """perplexity"""}
A__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def _a ( __UpperCamelCase : Dict ):
@wraps(__UpperCamelCase )
def wrapper(self : Dict ,__UpperCamelCase : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Any ,__UpperCamelCase : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Optional[int] ,__UpperCamelCase : List[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( ):
lowerCAmelCase__ : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@local
class lowercase ( parameterized.TestCase ):
__a = {}
__a = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = '''[...]'''
lowerCAmelCase__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
lowerCAmelCase__ : Tuple = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ )
# check parameters
lowerCAmelCase__ : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ : Dict = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = '''[...]'''
lowerCAmelCase__ : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ : str = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ):
yield
else:
yield
@contextmanager
def lowercase_ ( self ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowerCAmelCase__ : Optional[Any] = load_local_metric
yield
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Any = contextmanager(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _a ( __UpperCamelCase : int ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class lowercase ( __UpperCamelCase ):
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowerCAmelCase__ : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _a ( __UpperCamelCase : str ):
import torch
def bert_cos_score_idf(__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,*__UpperCamelCase : str ,**__UpperCamelCase : List[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowerCAmelCase__ : List[str] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _a ( __UpperCamelCase : Tuple ):
def load_from_checkpoint(__UpperCamelCase : Any ):
class lowercase :
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == 2
lowerCAmelCase__ : Dict = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowerCAmelCase__ : str = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowerCAmelCase__ : Optional[Any] = load_from_checkpoint
yield
def _a ( ):
lowerCAmelCase__ : int = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
lowerCAmelCase__ : Dict = '''ERROR'''
lowerCAmelCase__ : Optional[int] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
| 233 | 0 |
"""simple docstring"""
UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase (a_ :int) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_))
def lowerCamelCase () -> int:
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(a_))
if __name__ == "__main__":
print(solution())
| 475 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : str = FlaxAutoencoderKL
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Dict = 4
lowercase :Optional[int] = 3
lowercase :Any = (3_2, 3_2)
lowercase :Optional[int] = jax.random.PRNGKey(0 )
lowercase :Any = jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase :Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 475 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ) -> int:
# Construct model
if openai_config_file == "":
__snake_case : str = OpenAIGPTConfig()
else:
__snake_case : List[Any] = OpenAIGPTConfig.from_json_file(_UpperCAmelCase )
__snake_case : Optional[int] = OpenAIGPTModel(_UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
__snake_case : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__snake_case : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() ,_UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A__ : Any = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 286 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_a : Optional[int] = logging.get_logger(__name__)
_a : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_a : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_a : Dict = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
_a : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _UpperCAmelCase ( A_ ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = RealmTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ :int = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ :Optional[Any] = do_lower_case
lowerCAmelCase__ :List[str] = strip_accents
lowerCAmelCase__ :Dict = tokenize_chinese_chars
lowerCAmelCase__ :Optional[Any] = normalizer_class(**_lowerCAmelCase )
lowerCAmelCase__ :Any = do_lower_case
def snake_case_ ( self , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = PaddingStrategy.MAX_LENGTH
lowerCAmelCase__ :Optional[int] = text
lowerCAmelCase__ :Tuple = kwargs.pop("text_pair" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop("return_tensors" , _lowerCAmelCase )
lowerCAmelCase__ :Tuple = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_lowerCAmelCase ):
if batch_text_pair is not None:
lowerCAmelCase__ :List[Any] = batch_text_pair[idx]
else:
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :List[str] = super().__call__(_lowerCAmelCase , _lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :str = encoded_candidates.get("input_ids" )
lowerCAmelCase__ :Union[str, Any] = encoded_candidates.get("attention_mask" )
lowerCAmelCase__ :int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowerCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowerCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = {key: item for key, item in output_data.items() if len(_lowerCAmelCase ) != 0}
return BatchEncoding(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = [self.sep_token_id]
lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 708 |
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: List[str] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase_: Optional[int] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
UpperCamelCase_: Tuple = model.state_dict()
def to_tf_var_name(UpperCAmelCase__ ):
for patt, repl in iter(UpperCAmelCase__ ):
UpperCamelCase_: Tuple = name.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return F'''bert/{name}'''
def create_tf_var(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: int = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase_: Optional[int] = tf.get_variable(dtype=UpperCAmelCase__ , shape=tensor.shape , name=UpperCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase_: Tuple = to_tf_var_name(UpperCAmelCase__ )
UpperCamelCase_: str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase_: str = torch_tensor.T
UpperCamelCase_: Optional[int] = create_tf_var(tensor=UpperCAmelCase__ , name=UpperCAmelCase__ , session=UpperCAmelCase__ )
tf.keras.backend.set_value(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[str] = session.run(UpperCAmelCase__ )
print(F'''Successfully created {tf_name}: {np.allclose(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
UpperCamelCase_: Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def snake_case (UpperCAmelCase__=None ) -> List[Any]:
UpperCamelCase_: int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Directory in which to save tensorflow model' )
UpperCamelCase_: Union[str, Any] = parser.parse_args(UpperCAmelCase__ )
UpperCamelCase_: int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main() | 57 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( lowercase__ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''BlipImageProcessor'''
lowercase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : List[str] ,SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = False
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.image_processor
def __call__(self : int ,SCREAMING_SNAKE_CASE_ : ImageInput = None ,SCREAMING_SNAKE_CASE_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE_ : bool = True ,SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE_ : Optional[int] = None ,SCREAMING_SNAKE_CASE_ : int = 0 ,SCREAMING_SNAKE_CASE_ : Optional[int] = None ,SCREAMING_SNAKE_CASE_ : Optional[bool] = None ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = True ,SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE_ : str ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def UpperCAmelCase (self : List[str] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase (self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 535 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = 0
for ch in input_str:
a_ = ord(UpperCAmelCase )
a_ = pow(2 , UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 | 1 |
import requests
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> None:
'''simple docstring'''
lowerCamelCase__: List[str] = {"""Content-Type""": """application/json"""}
lowerCamelCase__: Dict = requests.post(_UpperCamelCase , json={"""text""": message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
lowerCamelCase__: str = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 306 |
_lowercase = 9.8_06_65
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 306 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case=True, snake_case="pt"):
__snake_case = {'''add_prefix_space''': True} if isinstance(snake_case, snake_case) and not line.startswith(''' ''') else {}
__snake_case = padding_side
return tokenizer(
[line], max_length=snake_case, padding='''max_length''' if pad_to_max_length else None, truncation=snake_case, return_tensors=snake_case, add_special_tokens=snake_case, **snake_case, )
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=None, ):
__snake_case = input_ids.ne(snake_case).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , A_ : Optional[int] , A_ : int , A_ : int , A_ : Any , A_ : str="train" , A_ : Tuple=None , A_ : int=None , A_ : Union[str, Any]=None , A_ : Union[str, Any]="" , ) -> Optional[int]:
super().__init__()
__snake_case = Path(A_ ).joinpath(type_path + '''.source''' )
__snake_case = Path(A_ ).joinpath(type_path + '''.target''' )
__snake_case = self.get_char_lens(self.src_file )
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self : Any ) -> Dict:
return len(self.src_lens )
def __getitem__( self : List[str] , A_ : Tuple ) -> Dict[str, torch.Tensor]:
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('''\n''' )
__snake_case = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
__snake_case = encode_line(A_ , A_ , self.max_source_length , '''right''' )
__snake_case = encode_line(A_ , A_ , self.max_target_length , '''right''' )
__snake_case = source_inputs['''input_ids'''].squeeze()
__snake_case = target_inputs['''input_ids'''].squeeze()
__snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( A_ : str ) -> Dict:
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def lowercase ( self : int , A_ : int ) -> Dict[str, torch.Tensor]:
__snake_case = torch.stack([x['''input_ids'''] for x in batch] )
__snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
__snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(A_ , A_ )
__snake_case , __snake_case = trim_batch(A_ , A_ , attention_mask=A_ )
__snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowercase : Optional[Any] = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case):
return list(itertools.chain.from_iterable(snake_case))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = get_git_info()
save_json(snake_case, os.path.join(snake_case, '''git_log.json'''))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=4, **snake_case):
with open(snake_case, '''w''') as f:
json.dump(snake_case, snake_case, indent=snake_case, **snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
with open(snake_case) as f:
return json.load(snake_case)
def SCREAMING_SNAKE_CASE ( ):
__snake_case = git.Repo(search_parent_directories=snake_case)
__snake_case = {
'''repo_id''': str(snake_case),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return list(map(snake_case, snake_case))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
with open(snake_case, '''wb''') as f:
return pickle.dump(snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
def remove_articles(snake_case):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', snake_case)
def white_space_fix(snake_case):
return " ".join(text.split())
def remove_punc(snake_case):
__snake_case = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case))))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = normalize_answer(snake_case).split()
__snake_case = normalize_answer(snake_case).split()
__snake_case = Counter(snake_case) & Counter(snake_case)
__snake_case = sum(common.values())
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return normalize_answer(snake_case) == normalize_answer(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
assert len(snake_case) == len(snake_case)
__snake_case = 0
for hypo, pred in zip(snake_case, snake_case):
em += exact_match_score(snake_case, snake_case)
if len(snake_case) > 0:
em /= len(snake_case)
return {"em": em}
def SCREAMING_SNAKE_CASE ( snake_case):
return model_prefix.startswith('''rag''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(snake_case, snake_case, snake_case):
if not hasattr(snake_case, snake_case) and not hasattr(snake_case, equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(snake_case))
delattr(snake_case, snake_case)
continue
__snake_case = p if hasattr(snake_case, snake_case) else equivalent_param[p]
setattr(snake_case, snake_case, getattr(snake_case, snake_case))
delattr(snake_case, snake_case)
return hparams, config | 93 | """simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = qiskit.Aer.get_backend('''aer_simulator''')
# Create a Quantum Circuit acting on the q register
__snake_case = qiskit.QuantumCircuit(snake_case, snake_case)
# Map the quantum measurement to the classical bits
circuit.measure([0], [0])
# Execute the circuit on the simulator
__snake_case = qiskit.execute(snake_case, snake_case, shots=10_00)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case)
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 93 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.