code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
__A = """hopper-medium-v2"""
__A = gym.make(env_name)
__A = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
__A = env.reset()
__A = 0
__A = 0
__A = 1000
__A = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A = pipeline(obs, planning_horizon=32)
# execute action in environment
__A , __A , __A , __A = env.step(denorm_actions)
__A = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__A = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 93 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
UpperCamelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCamelCase_ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
UpperCamelCase_ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : str ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(UpperCamelCase_ , UpperCamelCase_ )["wer"]
else:
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :Tuple = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Tuple = compute_measures(UpperCamelCase_ , UpperCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 320 | '''simple docstring'''
import argparse
from collections import defaultdict
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCAmelCase__ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ :int = f.readlines()
SCREAMING_SNAKE_CASE__ :int = F'''class {class_name}('''
SCREAMING_SNAKE_CASE__ :int = F'''{4 * " "}def {test_name}('''
SCREAMING_SNAKE_CASE__ :List[str] = F'''{8 * " "}{correct_line.split()[0]}'''
SCREAMING_SNAKE_CASE__ :Any = F'''{1_6 * " "}{correct_line.split()[0]}'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Any = 0
SCREAMING_SNAKE_CASE__ :str = 0
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
for line in lines:
if line.startswith(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :List[str] = True
elif in_class and line.startswith(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Any = True
elif in_class and in_func and (line.startswith(UpperCAmelCase__ ) or line.startswith(UpperCAmelCase__ )):
SCREAMING_SNAKE_CASE__ :List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
SCREAMING_SNAKE_CASE__ :str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
SCREAMING_SNAKE_CASE__ :Any = False
else:
new_lines.append(UpperCAmelCase__ )
with open(UpperCAmelCase__ , 'w' ) as f:
for line in new_lines:
f.write(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]=None ) -> int:
'''simple docstring'''
if fail is not None:
with open(UpperCAmelCase__ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ :str = {l.strip() for l in f.readlines()}
else:
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
with open(UpperCAmelCase__ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ :Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE__ :Optional[Any] = defaultdict(UpperCAmelCase__ )
for line in correct_lines:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
UpperCamelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 320 | 1 |
'''simple docstring'''
def _snake_case ( A = 50 ) -> int:
lowerCAmelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A_ : Union[str, Any] ={
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "dhaka" , snake_case : int = 5 )-> int:
_lowerCamelCase = min(snake_case , 50 ) # Prevent abuse!
_lowerCamelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_lowerCamelCase = requests.get('https://www.google.com/search' , params=snake_case , headers=snake_case )
_lowerCamelCase = BeautifulSoup(html.text , 'html.parser' )
_lowerCamelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_lowerCamelCase = json.dumps(snake_case )
_lowerCamelCase = json.loads(snake_case )
_lowerCamelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCamelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(snake_case ) , )
_lowerCamelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = urllib.request.build_opener()
_lowerCamelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(snake_case )
_lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
A_ : Any =download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 650 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =AlbertTokenizer
snake_case =AlbertTokenizerFast
snake_case =True
snake_case =True
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase =AlbertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase ="this is a test"
_UpperCAmelCase ="this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="<pad>"
_UpperCAmelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(_snake_case ) , 3_0000 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.tokenize(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AlbertTokenizer(_snake_case , keep_accents=_snake_case )
_UpperCAmelCase =tokenizer.tokenize("This is a test" )
self.assertListEqual(_snake_case , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [48, 25, 21, 1289] )
_UpperCAmelCase =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_UpperCAmelCase =tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase =tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AlbertTokenizer(_snake_case )
_UpperCAmelCase =tokenizer.encode("sequence builders" )
_UpperCAmelCase =tokenizer.encode("multi-sequence build" )
_UpperCAmelCase =tokenizer.build_inputs_with_special_tokens(_snake_case )
_UpperCAmelCase =tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
_UpperCAmelCase ={"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 592 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=33 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =seq_length
_UpperCAmelCase =is_training
_UpperCAmelCase =use_input_mask
_UpperCAmelCase =use_token_type_ids
_UpperCAmelCase =use_labels
_UpperCAmelCase =vocab_size
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_act
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =type_vocab_size
_UpperCAmelCase =type_sequence_label_size
_UpperCAmelCase =initializer_range
_UpperCAmelCase =num_labels
_UpperCAmelCase =num_choices
_UpperCAmelCase =scope
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase =None
if self.use_input_mask:
_UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
if self.use_labels:
_UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =EsmModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case )
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =EsmForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =EsmForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) =config_and_inputs
_UpperCAmelCase ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =False
snake_case =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case =()
snake_case =(
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EsmModelTester(self )
_UpperCAmelCase =ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase =type
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase =EsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase =EsmEmbeddings(config=_snake_case )
_UpperCAmelCase =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_UpperCAmelCase =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_UpperCAmelCase =create_position_ids_from_input_ids(_snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase =EsmEmbeddings(config=_snake_case )
_UpperCAmelCase =torch.empty(2 , 4 , 30 )
_UpperCAmelCase =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_UpperCAmelCase =torch.as_tensor([expected_single_positions, expected_single_positions] )
_UpperCAmelCase =embeddings.create_position_ids_from_inputs_embeds(_snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _a ( A__ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
_UpperCAmelCase =EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase =model(_snake_case )[0]
_UpperCAmelCase =33
_UpperCAmelCase =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
_UpperCAmelCase =EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase =model(_snake_case )[0]
# compare the actual values for a slice.
_UpperCAmelCase =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
| 592 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import math
def __a ( A__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( A__ : float = 0.1 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(A__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
_lowerCamelCase : str = StableDiffusionLDMaDPipeline
_lowerCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : int ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase = CLIPTextModel(snake_case_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=0 ):
if str(snake_case_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self : str ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case_ )
_UpperCAmelCase = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_UpperCAmelCase = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_UpperCAmelCase = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case_ )
_UpperCAmelCase = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_UpperCAmelCase = text_inputs["input_ids"].to(snake_case_ )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case_ )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowercase ( self : Tuple ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case_ )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case_ )
_UpperCAmelCase = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = ldmad_pipe(**snake_case_ , negative_prompt=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_UpperCAmelCase = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_UpperCAmelCase = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]="cpu" , snake_case_ : int=torch.floataa , snake_case_ : str=0 ):
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 6_4, 6_4) )
_UpperCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase ( self : List[str] ):
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
_UpperCAmelCase = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(snake_case_ )
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
_UpperCAmelCase = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_UpperCAmelCase = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowercase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : str="cpu" , snake_case_ : List[str]=torch.floataa , snake_case_ : List[Any]=0 ):
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 6_4, 6_4) )
_UpperCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase ( self : Tuple ):
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(snake_case_ )
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4_9_5_5_8_6
_UpperCAmelCase = 0.3_3_7_9_5_5_1_5
_UpperCAmelCase = 1_1_2.4_8_5_1_8
_UpperCAmelCase = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(snake_case_ )
_UpperCAmelCase = ldmad_pipe(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4_1_9_4_1_2_7
_UpperCAmelCase = 0.3_5_3_7_5_5_8_6
_UpperCAmelCase = 0.5_6_3_8_5_0_2
_UpperCAmelCase = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 236 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPFeatureExtractor''']
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 | 1 |
"""simple docstring"""
def a__ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
lowerCamelCase = False
if num < 0:
lowerCamelCase = True
lowerCamelCase = -num
lowerCamelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
lowerCamelCase = len(snake_case__ )
for i in range(snake_case__ ):
lowerCamelCase = -1
for j in range(i + 1 , snake_case__ ):
if arr[i] < arr[j]:
lowerCamelCase = arr[j]
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
for i, outer in enumerate(snake_case__ ):
lowerCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase = inner
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = []
lowerCamelCase = [-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Dict = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 533 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 95 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 271 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
UpperCAmelCase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def A ( snake_case__ : List[str] ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case__ : Optional[Any] ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case__ : List[Any] ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase_ ) != count_coins(lowerCamelCase_ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(snake_case__ : Any ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case = get_distrib(node.left )
__snake_case = get_distrib(node.right )
__snake_case = 1 - left_distrib_excess
__snake_case = 1 - right_distrib_excess
__snake_case = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase_ )
+ abs(lowerCamelCase_ )
)
__snake_case = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase_ , lowerCamelCase_ )
return get_distrib(lowerCamelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' ) | 39 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ :int = logging.get_logger(__name__)
class snake_case__ ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[Any], _snake_case : int=8_0, _snake_case : Tuple=1_6_0_0_0, _snake_case : int=8_0, _snake_case : int=0.0, _snake_case : Any=True, _snake_case : int=True, _snake_case : List[Any]=True, **_snake_case : Optional[Any], ) ->Tuple:
super().__init__(feature_size=lowerCamelCase_, sampling_rate=lowerCamelCase_, padding_value=lowerCamelCase_, **lowerCamelCase_ )
snake_case__ : str = num_mel_bins
snake_case__ : Union[str, Any] = do_ceptral_normalize
snake_case__ : List[Any] = normalize_means
snake_case__ : Union[str, Any] = normalize_vars
snake_case__ : int = True
def lowercase_ ( self : Union[str, Any], _snake_case : np.ndarray, ) ->np.ndarray:
snake_case__ : List[Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
snake_case__ : str = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 )
snake_case__ : List[Any] = ta_kaldi.fbank(lowerCamelCase_, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowercase_ ( _snake_case : np.ndarray, _snake_case : int, _snake_case : Optional[bool] = True, _snake_case : Optional[bool] = True, _snake_case : float = 0.0, ) ->np.ndarray:
if normalize_means:
snake_case__ : Union[str, Any] = x[:input_length].mean(axis=0 )
snake_case__ : Optional[int] = np.subtract(lowerCamelCase_, lowerCamelCase_ )
if normalize_vars:
snake_case__ : Dict = x[:input_length].std(axis=0 )
snake_case__ : Tuple = np.divide(lowerCamelCase_, lowerCamelCase_ )
if input_length < x.shape[0]:
snake_case__ : Tuple = padding_value
# make sure array is in float32
snake_case__ : int = x.astype(np.floataa )
return x
def lowercase_ ( self : Tuple, _snake_case : List[np.ndarray], _snake_case : Optional[np.ndarray] = None ) ->List[np.ndarray]:
snake_case__ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase_, lowerCamelCase_, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(lowerCamelCase_, lowerCamelCase_ )
]
def __call__( self : int, _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _snake_case : Union[bool, str, PaddingStrategy] = False, _snake_case : Optional[int] = None, _snake_case : bool = False, _snake_case : Optional[int] = None, _snake_case : Optional[Union[str, TensorType]] = None, _snake_case : Optional[int] = None, _snake_case : Optional[bool] = None, **_snake_case : str, ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case__ : Optional[int] = isinstance(lowerCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ : Any = is_batched_numpy or (
isinstance(lowerCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Optional[int] = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_, np.ndarray ):
snake_case__ : int = np.asarray(lowerCamelCase_, dtype=np.floataa )
elif isinstance(lowerCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Any = [raw_speech]
# extract fbank features
snake_case__ : str = [self._extract_fbank_features(lowerCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
snake_case__ : Any = BatchFeature({'input_features': features} )
snake_case__ : int = self.pad(
lowerCamelCase_, padding=lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_, pad_to_multiple_of=lowerCamelCase_, return_attention_mask=lowerCamelCase_, **lowerCamelCase_, )
# make sure list is in array format
snake_case__ : List[Any] = padded_inputs.get('input_features' )
if isinstance(input_features[0], lowerCamelCase_ ):
snake_case__ : Optional[Any] = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for feature in input_features]
snake_case__ : List[str] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
snake_case__ : List[Any] = [np.asarray(lowerCamelCase_, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case__ : Dict = (
np.array(lowerCamelCase_, dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_, max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case__ : str = self.normalize(
padded_inputs['input_features'], attention_mask=lowerCamelCase_ )
if return_tensors is not None:
snake_case__ : List[Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 718 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ (A : Optional[int] , A : List[str] ):
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowercase_ (A : str , A : Any , A : Dict ):
snake_case__ : Dict = tmp_path / 'cache'
snake_case__ : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Optional[int] = TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowercase_ (A : Any , A : List[str] , A : int ):
snake_case__ : Optional[int] = tmp_path / 'cache'
snake_case__ : Union[str, Any] = {'text': 'string'}
snake_case__ : List[Any] = features.copy() if features else default_expected_features
snake_case__ : List[Any] = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Tuple = TextDatasetReader(A , features=A , cache_dir=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowercase_ (A : Tuple , A : Optional[int] , A : Any ):
snake_case__ : Optional[int] = tmp_path / 'cache'
snake_case__ : Optional[Any] = {'text': 'string'}
snake_case__ : List[Any] = TextDatasetReader(A , cache_dir=A , split=A ).read()
_check_text_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowercase_ (A : List[str] , A : Any , A : int ):
if issubclass(A , A ):
snake_case__ : Union[str, Any] = text_path
elif issubclass(A , A ):
snake_case__ : Optional[Any] = [text_path]
snake_case__ : Tuple = tmp_path / 'cache'
snake_case__ : Any = {'text': 'string'}
snake_case__ : Tuple = TextDatasetReader(A , cache_dir=A ).read()
_check_text_dataset(A , A )
def lowercase_ (A : Optional[Any] , A : List[str] , A : List[str]=("train",) ):
assert isinstance(A , A )
for split in splits:
snake_case__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowercase_ (A : Union[str, Any] , A : Union[str, Any] , A : Any ):
snake_case__ : Any = tmp_path / 'cache'
snake_case__ : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Tuple = TextDatasetReader({'train': text_path} , cache_dir=A , keep_in_memory=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowercase_ (A : Union[str, Any] , A : Optional[Any] , A : Any ):
snake_case__ : Tuple = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case__ : List[str] = {'text': 'string'}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : Union[str, Any] = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Dict = TextDatasetReader({'train': text_path} , features=A , cache_dir=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowercase_ (A : int , A : str , A : List[str] ):
if split:
snake_case__ : Optional[int] = {split: text_path}
else:
snake_case__ : Optional[int] = 'train'
snake_case__ : int = {'train': text_path, 'test': text_path}
snake_case__ : int = tmp_path / 'cache'
snake_case__ : Optional[Any] = {'text': 'string'}
snake_case__ : Optional[Any] = TextDatasetReader(A , cache_dir=A ).read()
_check_text_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 243 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Any=32 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[str]=10 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : int=[1, 1, 2, 1] , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : int=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(UpperCAmelCase )
def __A ( self : str ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = self.get_config()
return config, pixel_values
def __A ( self : Union[str, Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = FlaxRegNetModel(config=UpperCAmelCase )
A_ = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
A_ = self.num_labels
A_ = FlaxRegNetForImageClassification(config=UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Union[str, Any] ):
A_ = self.prepare_config_and_inputs()
A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_lowerCamelCase : Dict = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = False
def __A ( self : Optional[int] ):
A_ = FlaxRegNetModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Dict ):
return
def __A ( self : Optional[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __A ( self : str ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __A ( self : int ):
pass
def __A ( self : Optional[Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ):
A_ = model_class(UpperCAmelCase )
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
A_ = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __A ( self : int ):
A_ = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="np" )
A_ = model(**UpperCAmelCase )
# verify the logits
A_ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) | 86 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a__ ( self ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> int:
A: int = ort.SessionOptions()
A: List[str] = False
return options
def a__ ( self ) -> List[str]:
A: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A: str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A: Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A: Any = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
A: Union[str, Any] = """A red cat sitting on a park bench"""
A: List[str] = np.random.RandomState(0 )
A: str = pipe(
prompt=A , image=A , mask_image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A , output_type="""np""" , )
A: List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 135 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a__ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
a__ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
a__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
a__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
a__ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
a__ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
a__ = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ , snake_case__ = randrange(len(a ) ), randrange(len(a ) )
snake_case__ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case__ ( a = 100 ) -> List[str]:
'''simple docstring'''
return (generate_random_hand() for _ in range(a ))
@pytest.mark.parametrize("""hand, expected""" , a )
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
assert PokerHand(a )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , a )
def snake_case__ ( a , a ) -> Any:
'''simple docstring'''
assert PokerHand(a )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , a )
def snake_case__ ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ = PokerHand(a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , a )
def snake_case__ ( a , a ) -> List[Any]:
'''simple docstring'''
assert PokerHand(a )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , a )
def snake_case__ ( a , a ) -> Dict:
'''simple docstring'''
assert PokerHand(a )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , a )
def snake_case__ ( a , a , a ) -> Tuple:
'''simple docstring'''
assert PokerHand(a ).compare_with(PokerHand(a ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def snake_case__ ( a , a , a ) -> str:
'''simple docstring'''
assert PokerHand(a ).compare_with(PokerHand(a ) ) == expected
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ = [PokerHand(a ) for hand in SORTED_HANDS]
snake_case__ = poker_hands.copy()
shuffle(a )
snake_case__ = chain(sorted(a ) )
for index, hand in enumerate(a ):
assert hand == poker_hands[index]
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case__ ( ) -> Dict:
'''simple docstring'''
snake_case__ = PokerHand("""2C 4S AS 3D 5C""" )
snake_case__ = True
snake_case__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ = 0
snake_case__ = os.path.abspath(os.path.dirname(a ) )
snake_case__ = os.path.join(a , """poker_hands.txt""" )
with open(a ) as file_hand:
for line in file_hand:
snake_case__ = line[:14].strip()
snake_case__ = line[15:].strip()
snake_case__ , snake_case__ = PokerHand(a ), PokerHand(a )
snake_case__ = player.compare_with(a )
if output == "Win":
answer += 1
assert answer == 376 | 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a__ = logging.get_logger(__name__)
a__ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Dict = """layoutlmv3"""
def __init__( self : Dict , __UpperCamelCase : Tuple=5_0_2_6_5 , __UpperCamelCase : Dict=7_6_8 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Any=3_0_7_2 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict=5_1_2 , __UpperCamelCase : Any=2 , __UpperCamelCase : Optional[int]=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Dict=1_0_2_4 , __UpperCamelCase : Dict=1_2_8 , __UpperCamelCase : str=1_2_8 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : str=1_2_8 , __UpperCamelCase : List[str]=6_4 , __UpperCamelCase : Optional[int]=2_5_6 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=2_2_4 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Union[str, Any]=1_6 , __UpperCamelCase : int=None , **__UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ = max_ad_position_embeddings
snake_case__ = coordinate_size
snake_case__ = shape_size
snake_case__ = has_relative_attention_bias
snake_case__ = rel_pos_bins
snake_case__ = max_rel_pos
snake_case__ = has_spatial_attention_bias
snake_case__ = rel_ad_pos_bins
snake_case__ = max_rel_ad_pos
snake_case__ = text_embed
snake_case__ = visual_embed
snake_case__ = input_size
snake_case__ = num_channels
snake_case__ = patch_size
snake_case__ = classifier_dropout
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : List[Any] = version.parse("""1.12""" )
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
return 1_2
def __lowerCAmelCase( self : str , __UpperCamelCase : "ProcessorMixin" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 4_0 , __UpperCamelCase : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
snake_case__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case__ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case__ = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs | 566 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[str] = '''autoformer'''
lowerCAmelCase__ : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Dict , lowerCamelCase : Dict = None , lowerCamelCase : str = None , lowerCamelCase : Optional[int] = "student_t" , lowerCamelCase : Tuple = "nll" , lowerCamelCase : List[Any] = 1 , lowerCamelCase : Any = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase : Union[str, Any] = True , lowerCamelCase : Tuple = 0 , lowerCamelCase : Any = 0 , lowerCamelCase : Tuple = 0 , lowerCamelCase : int = 0 , lowerCamelCase : Dict = None , lowerCamelCase : int = None , lowerCamelCase : Optional[Any] = 6_4 , lowerCamelCase : Dict = 2 , lowerCamelCase : List[str] = 2 , lowerCamelCase : Tuple = 2 , lowerCamelCase : int = 2 , lowerCamelCase : Dict = 3_2 , lowerCamelCase : List[Any] = 3_2 , lowerCamelCase : Optional[int] = "gelu" , lowerCamelCase : str = 0.1 , lowerCamelCase : List[str] = 0.1 , lowerCamelCase : Dict = 0.1 , lowerCamelCase : Any = 0.1 , lowerCamelCase : int = 0.1 , lowerCamelCase : Tuple = 1_0_0 , lowerCamelCase : Any = 0.02 , lowerCamelCase : str = True , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any] = 1_0 , lowerCamelCase : List[Any] = 2_5 , lowerCamelCase : Any = 3 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
# time series specific configuration
a__ = prediction_length
a__ = context_length if context_length is not None else prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ = cardinality
else:
a__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ = embedding_dimension
else:
a__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(self.lags_sequence ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
# Autoformer
a__ = label_length
a__ = moving_average
a__ = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 489 | """simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
lowerCamelCase__ =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase__ =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
lowerCamelCase__ =proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 ) -> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
lowerCamelCase__ =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =(max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def lowerCamelCase_ ( __lowerCAmelCase ) -> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
lowerCamelCase__ =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase :Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase :Union[str, Any] = "▁"
__UpperCAmelCase :str = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__UpperCAmelCase :Tuple = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__UpperCAmelCase :Optional[int] = {"vinai/bartpho-syllable": 1_0_2_4}
class a ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[Any]="<s>" , snake_case : Optional[int]="</s>" , snake_case : Optional[int]="</s>" , snake_case : Optional[Any]="<s>" , snake_case : int="<unk>" , snake_case : Optional[int]="<pad>" , snake_case : List[Any]="<mask>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : int , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
__UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
__UpperCAmelCase : Tuple = vocab_file
__UpperCAmelCase : Any = monolingual_vocab_file
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
__UpperCAmelCase : List[Any] = cnt
cnt += 1
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__UpperCAmelCase : int = line.strip().split()[0]
__UpperCAmelCase : Dict = len(self.fairseq_tokens_to_ids )
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
__UpperCAmelCase : Optional[Any] = len(self.fairseq_tokens_to_ids )
__UpperCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> List[str]:
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase : int = {}
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
__UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[str]:
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Dict = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Tuple , snake_case : str ) -> Dict:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[Any] ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ ( self : Optional[int] , snake_case : str ) -> int:
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = """""".join(lowercase_ ).replace(lowercase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> str:
if not os.path.isdir(lowercase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : str = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
__UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(lowercase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file | 708 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__UpperCAmelCase :Optional[Any] = True
from torch.cuda.amp import autocast
__UpperCAmelCase :Any = logging.getLogger(__name__)
def _a ( _lowercase : List[str]=None , _lowercase : Union[str, Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=_a , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : WavaVecaProcessor
SCREAMING_SNAKE_CASE : Union[bool, str] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self : Any , snake_case : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCAmelCase : List[Any] = [{'''input_values''': feature['''input_values''']} for feature in features]
__UpperCAmelCase : List[str] = [{'''input_ids''': feature['''labels''']} for feature in features]
__UpperCAmelCase : List[Any] = self.processor.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCAmelCase : Union[str, Any] = self.processor.pad(
labels=snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase : List[str] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCAmelCase : str = labels
return batch
class a ( _a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] , snake_case : nn.Module , snake_case : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__UpperCAmelCase : Any = self._prepare_inputs(snake_case )
if self.use_amp:
with autocast():
__UpperCAmelCase : Optional[Any] = self.compute_loss(snake_case , snake_case )
else:
__UpperCAmelCase : int = self.compute_loss(snake_case , snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase : Tuple = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case )
else:
loss.backward()
return loss.detach()
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase : str = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase : Union[str, Any] = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__UpperCAmelCase : Dict = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(_lowercase : Any ):
__UpperCAmelCase : str = re.sub(_lowercase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__UpperCAmelCase : Dict = train_dataset.map(_lowercase , remove_columns=['''sentence'''] )
__UpperCAmelCase : Optional[Any] = eval_dataset.map(_lowercase , remove_columns=['''sentence'''] )
def extract_all_chars(_lowercase : List[Any] ):
__UpperCAmelCase : Tuple = ''' '''.join(batch['''text'''] )
__UpperCAmelCase : List[Any] = list(set(_lowercase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase : Dict = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=train_dataset.column_names , )
__UpperCAmelCase : Tuple = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase : Any = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__UpperCAmelCase : List[str] = {v: k for k, v in enumerate(_lowercase )}
__UpperCAmelCase : str = vocab_dict[''' ''']
del vocab_dict[" "]
__UpperCAmelCase : Dict = len(_lowercase )
__UpperCAmelCase : Optional[Any] = len(_lowercase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_lowercase , _lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_lowercase , return_attention_mask=_lowercase )
__UpperCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase : Tuple = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = min(len(_lowercase ) , data_args.max_train_samples )
__UpperCAmelCase : Any = train_dataset.select(range(_lowercase ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase : Optional[int] = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase : List[Any] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowercase : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase : int = torchaudio.load(batch['''path'''] )
__UpperCAmelCase : Optional[int] = resampler(_lowercase ).squeeze().numpy()
__UpperCAmelCase : Optional[int] = 16000
__UpperCAmelCase : Union[str, Any] = batch['''text''']
return batch
__UpperCAmelCase : int = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Any = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowercase : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
__UpperCAmelCase : int = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_lowercase )
return batch
__UpperCAmelCase : int = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Tuple = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase : List[Any] = datasets.load_metric('''wer''' )
def compute_metrics(_lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = pred.predictions
__UpperCAmelCase : int = np.argmax(_lowercase , axis=-1 )
__UpperCAmelCase : str = processor.tokenizer.pad_token_id
__UpperCAmelCase : Union[str, Any] = processor.batch_decode(_lowercase )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase : Tuple = processor.batch_decode(pred.label_ids , group_tokens=_lowercase )
__UpperCAmelCase : Optional[Any] = wer_metric.compute(predictions=_lowercase , references=_lowercase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase : List[str] = DataCollatorCTCWithPadding(processor=_lowercase , padding=_lowercase )
# Initialize our Trainer
__UpperCAmelCase : List[str] = CTCTrainer(
model=_lowercase , data_collator=_lowercase , args=_lowercase , compute_metrics=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : List[Any] = model_args.model_name_or_path
else:
__UpperCAmelCase : Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
__UpperCAmelCase : Tuple = train_result.metrics
__UpperCAmelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
__UpperCAmelCase : Tuple = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
__UpperCAmelCase : Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase : Tuple = trainer.evaluate()
__UpperCAmelCase : List[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowercase )
__UpperCAmelCase : List[str] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
return results
if __name__ == "__main__":
main() | 266 | 0 |
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( A_: List[str] , A_: int , A_: List[str]=None , **A_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =[x.strip() for x in open(snake_case__ ).readlines()]
__UpperCAmelCase =[x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
__UpperCAmelCase =calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 68 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( snake_case__ , snake_case__ = None , snake_case__ = None ):
if start is None:
A_ : Dict = 0
if end is None:
A_ : Dict = len(snake_case__ ) - 1
if start >= end:
return
A_ : List[Any] = (start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
A_ , A_ : Dict = sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 | 0 |
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = abs(lowerCAmelCase__ )
_lowerCAmelCase : int = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = abs(lowerCAmelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
return sum(int(lowerCAmelCase__ ) for c in str(abs(lowerCAmelCase__ ) ) )
def UpperCamelCase_ ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_lowerCAmelCase : int = f"""{func.__name__}({value})"""
_lowerCAmelCase : Tuple = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(lowerCAmelCase__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 709 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case = logging.get_logger(__name__)
snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
snake_case = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : Dict = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Union[str, Any] = char
return pairs
class __A ( snake_case__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="replace" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=False , **_snake_case , ):
_lowerCAmelCase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
_lowerCAmelCase : Optional[int] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
_lowerCAmelCase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
_lowerCAmelCase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_lowerCAmelCase : Tuple = json.load(_snake_case )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
_lowerCAmelCase : Any = bytes_to_unicode()
_lowerCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding="utf-8" ) as merges_handle:
_lowerCAmelCase : List[str] = merges_handle.read().split("\n" )[1:-1]
_lowerCAmelCase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = tuple(_snake_case )
_lowerCAmelCase : Union[str, Any] = get_pairs(_snake_case )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Tuple = bigram
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = 0
while i < len(_snake_case ):
try:
_lowerCAmelCase : Union[str, Any] = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_snake_case )
_lowerCAmelCase : str = new_word
if len(_snake_case ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_snake_case )
_lowerCAmelCase : Tuple = " ".join(_snake_case )
_lowerCAmelCase : Optional[int] = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat , _snake_case ):
_lowerCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.decoder.get(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : str = "".join(_snake_case )
_lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Any = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + "\n" )
_lowerCAmelCase : Any = 0
with open(_snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
_lowerCAmelCase : Union[str, Any] = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=False , **_snake_case ):
_lowerCAmelCase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
_lowerCAmelCase : List[str] = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = PaddingStrategy.DO_NOT_PAD , _snake_case = None , _snake_case = None , ):
_lowerCAmelCase : Union[str, Any] = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(_snake_case )
if needs_to_be_padded:
_lowerCAmelCase : Any = len(_snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[str] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 587 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not is_accelerate_available():
return method
snake_case__ : Union[str, Any] = version.parse(accelerate.__version__).base_version
if version.parse(UpperCAmelCase_) < version.parse("""0.17.0"""):
return method
def wrapper(self , *UpperCAmelCase_ , **UpperCAmelCase_):
if hasattr(self , """_hf_hook""") and hasattr(self._hf_hook , """pre_forward"""):
self._hf_hook.pre_forward(self)
return method(self , *UpperCAmelCase_ , **UpperCAmelCase_)
return wrapper
| 648 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_: str = logging.get_logger(__name__)
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : int , *__a : Tuple , **__a : Union[str, Any] ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 648 | 1 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : str = '▁'
UpperCAmelCase : Optional[int] = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase : Optional[Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
__UpperCAmelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = token.rstrip("""\n""" )
__UpperCAmelCase : Union[str, Any] = index
return vocab
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="[SEP]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : int="[UNK]" , UpperCamelCase : Tuple="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__UpperCAmelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Optional[Any] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__UpperCAmelCase : int = f'''[unused{i}]'''
__UpperCAmelCase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : List[str] = 12
__UpperCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase )
def __getstate__( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Any = None
return state
def __setstate__( self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : str = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , """wb""" ) as fi:
__UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 703 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=32 , UpperCamelCase : int=3 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Any=[8, 16, 32, 64] , UpperCamelCase : Optional[int]=[1, 1, 2, 1] , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[Any]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[int]=[2, 3, 4] , UpperCamelCase : Any=1 , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : int = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : str = out_indices
__UpperCAmelCase : Optional[int] = num_groups
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__a = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str ):
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : List[Any] = layer_type
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (BitBackbone,) if is_torch_available() else ()
__a = BitConfig
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
| 299 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Distribution , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> int:
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE__ )] )
@property
def a ( self : int ) -> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def a ( self : str ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a ( self : str ) -> List[str]:
return self.variance.sqrt()
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE__ : Dict ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> Tuple[torch.Tensor]:
__lowerCAmelCase = [proj(SCREAMING_SNAKE_CASE__ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE__ )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
super().__init__()
__lowerCAmelCase = function
def a ( self : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
return self.function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : type
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : Dict[str, int]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int = 1 ) -> None:
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE__ ) , 1 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Distribution:
__lowerCAmelCase = self._base_distribution(SCREAMING_SNAKE_CASE__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE__ , loc=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , event_dim=self.event_dim )
@property
def a ( self : Optional[Any] ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a ( self : str ) -> int:
return len(self.event_shape )
@property
def a ( self : Optional[Any] ) -> float:
return 0.0
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> nn.Module:
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> Any:
raise NotImplementedError()
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE__ ) + 4.0 )) / 2.0
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_SCREAMING_SNAKE_CASE : type = StudentT
@classmethod
def a ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> Tuple:
__lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict[str, int] = {"loc": 1, "scale": 1}
_SCREAMING_SNAKE_CASE : type = Normal
@classmethod
def a ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> Union[str, Any]:
__lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict[str, int] = {"total_count": 1, "logits": 1}
_SCREAMING_SNAKE_CASE : type = NegativeBinomial
@classmethod
def a ( cls : str , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> Tuple:
__lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Distribution:
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ ) , 1 )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ) -> Distribution:
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 427 | '''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
_A : Any = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
_A : Optional[int] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
_A : Any = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=None ) -> Union[str, Any]:
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ ) ),
}
| 427 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = botoa.client('''iam''' )
UpperCAmelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE_ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
UpperCAmelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE_ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = botoa.client('''iam''' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE_ )["Role"]["Arn"]
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = None
if credentials_configuration == 0:
UpperCAmelCase = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
UpperCAmelCase = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
UpperCAmelCase = _ask_field('''AWS Access Key ID: ''' )
UpperCAmelCase = aws_access_key_id
UpperCAmelCase = _ask_field('''AWS Secret Access Key: ''' )
UpperCAmelCase = aws_secret_access_key
UpperCAmelCase = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
UpperCAmelCase = aws_region
UpperCAmelCase = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , SCREAMING_SNAKE_CASE_ , )
if role_management == 0:
UpperCAmelCase = _ask_field('''Enter your IAM role name: ''' )
else:
UpperCAmelCase = '''accelerate_sagemaker_execution_role'''
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
UpperCAmelCase = None
if is_custom_docker_image:
UpperCAmelCase = _ask_field('''Enter your Docker image: ''' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() )
UpperCAmelCase = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
UpperCAmelCase = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
UpperCAmelCase = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase = {}
UpperCAmelCase = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
UpperCAmelCase = '''dynamo_'''
UpperCAmelCase = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
UpperCAmelCase = _ask_options(
'''Which mode do you want to use?''' , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE_ )] , default='''default''' , )
UpperCAmelCase = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
UpperCAmelCase = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='''Please enter yes or no.''' , )
UpperCAmelCase = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase = _ask_options(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase = _ask_field(SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , default='''ml.p3.2xlarge''' )
UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase = _ask_field(
'''How many machines do you want use? [1]: ''' , SCREAMING_SNAKE_CASE_ , default=1 , )
UpperCAmelCase = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE_ , use_cpu=SCREAMING_SNAKE_CASE_ , dynamo_config=SCREAMING_SNAKE_CASE_ , eca_instance_type=SCREAMING_SNAKE_CASE_ , profile=SCREAMING_SNAKE_CASE_ , region=SCREAMING_SNAKE_CASE_ , iam_role_name=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ , num_machines=SCREAMING_SNAKE_CASE_ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE_ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE_ , )
| 570 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
a__ : Any = parse(importlib.metadata.version('torch'))
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Version] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE_ ) )
return operation(SCREAMING_SNAKE_CASE_ , parse(SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 570 | 1 |
import numpy as np
import qiskit
def UpperCamelCase__ ( lowerCAmelCase__ = 8 ,lowerCAmelCase__ = None ):
lowercase = np.random.default_rng(seed=lowerCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowercase = 6 * key_len
# Measurement basis for Alice's qubits.
lowercase = rng.integers(2 ,size=lowerCAmelCase__ )
# The set of states Alice will prepare.
lowercase = rng.integers(2 ,size=lowerCAmelCase__ )
# Measurement basis for Bob's qubits.
lowercase = rng.integers(2 ,size=lowerCAmelCase__ )
# Quantum Circuit to simulate BB84
lowercase = qiskit.QuantumCircuit(lowerCAmelCase__ ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowercase = qiskit.execute(lowerCAmelCase__ ,lowerCAmelCase__ ,shots=1 ,seed_simulator=lowerCAmelCase__ )
# Returns the result of measurement.
lowercase = job.result().get_counts(lowerCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowercase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowercase = gen_key[:key_len] if len(lowerCAmelCase__ ) >= key_len else gen_key.ljust(lowerCAmelCase__ ,"""0""" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 428 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( __a , unittest.TestCase ):
_A :Tuple = KandinskyVaaInpaintPipeline
_A :Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A :Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A :Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A :Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**snake_case__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case__ , )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 ):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(snake_case__ ).startswith("""mps""" ):
lowercase = torch.manual_seed(snake_case__ )
else:
lowercase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**snake_case__ )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((7_68, 7_68) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 428 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase( _a ):
snake_case_ : str = """time_series_transformer"""
snake_case_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 3_2 , SCREAMING_SNAKE_CASE : int = 3_2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : int = 6_4 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 1_0_0 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Dict=True , **SCREAMING_SNAKE_CASE : str , ) -> str:
'''simple docstring'''
__snake_case = prediction_length
__snake_case = context_length or prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(SCREAMING_SNAKE_CASE ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 473 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Tuple = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class UpperCamelCase( _a ):
snake_case_ : List[Any] = """data2vec-vision"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str]=7_6_8 , SCREAMING_SNAKE_CASE : Tuple=1_2 , SCREAMING_SNAKE_CASE : int=1_2 , SCREAMING_SNAKE_CASE : List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[str]=1e-1_2 , SCREAMING_SNAKE_CASE : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[str]=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE : int=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Tuple=0.4 , SCREAMING_SNAKE_CASE : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[Any]=2_5_5 , **SCREAMING_SNAKE_CASE : Any , ) -> List[str]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = use_mask_token
__snake_case = use_absolute_position_embeddings
__snake_case = use_relative_position_bias
__snake_case = use_shared_relative_position_bias
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case = out_indices
__snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = semantic_loss_ignore_index
class UpperCamelCase( _a ):
snake_case_ : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 473 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase =get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase =get_activation('gelu' )
__lowercase =get_activation('gelu_10' )
__lowercase =torch_builtin(__lowercase )
__lowercase =geluaa(__lowercase )
__lowercase =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self : Tuple ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__lowercase ):
get_activation('bogus' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =get_activation('gelu' )
__lowercase =1
__lowercase =get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
__lowercase =acta.a
| 119 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = "dinat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , __lowercase : str=4 , __lowercase : str=3 , __lowercase : int=64 , __lowercase : int=[3, 4, 6, 5] , __lowercase : Union[str, Any]=[2, 4, 8, 16] , __lowercase : Optional[int]=7 , __lowercase : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowercase : Any=3.0 , __lowercase : Optional[Any]=True , __lowercase : List[Any]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : int=0.1 , __lowercase : Union[str, Any]="gelu" , __lowercase : Any=0.0_2 , __lowercase : int=1E-5 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[int]=None , __lowercase : str=None , **__lowercase : Any , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =patch_size
__lowercase =num_channels
__lowercase =embed_dim
__lowercase =depths
__lowercase =len(__lowercase )
__lowercase =num_heads
__lowercase =kernel_size
__lowercase =dilations
__lowercase =mlp_ratio
__lowercase =qkv_bias
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =drop_path_rate
__lowercase =hidden_act
__lowercase =layer_norm_eps
__lowercase =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase =int(embed_dim * 2 ** (len(__lowercase ) - 1) )
__lowercase =layer_scale_init_value
__lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 119 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a =(
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
a =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = 'https://pypi.org/pypi/diffusers/json'
__lowerCamelCase : Tuple = json.loads(request.urlopen(lowerCamelCase__ ).read() )['releases'].keys()
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : version.Version(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : Dict = Path(lowerCamelCase__ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
init_hf_modules()
__lowerCamelCase : Union[str, Any] = Path(lowerCamelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Dict = f.read()
# Imports of the form `import .xxx`
__lowerCamelCase : List[Any] = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[str] = [module_file]
__lowerCamelCase : Union[str, Any] = []
# Let's recurse through all relative imports
while not no_change:
__lowerCamelCase : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase__ ) )
__lowerCamelCase : List[str] = Path(lowerCamelCase__ ).parent
__lowerCamelCase : Optional[Any] = [str(module_path / m ) for m in new_imports]
__lowerCamelCase : Optional[int] = [f for f in new_import_files if f not in all_relative_imports]
__lowerCamelCase : List[Any] = [F"{f}.py" for f in new_import_files]
__lowerCamelCase : Optional[Any] = len(lowerCamelCase__ ) == 0
all_relative_imports.extend(lowerCamelCase__ )
return all_relative_imports
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Dict = f.read()
# Imports of the form `import xxx`
__lowerCamelCase : str = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
__lowerCamelCase : Any = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
__lowerCamelCase : Union[str, Any] = list(set(lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase__ )
except ImportError:
missing_packages.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F"{', '.join(lowerCamelCase__ )}. Run `pip install {' '.join(lowerCamelCase__ )}`" )
return get_relative_imports(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : int = module_path.replace(os.path.sep , '.' )
__lowerCamelCase : Any = importlib.import_module(lowerCamelCase__ )
if class_name is None:
return find_pipeline_class(lowerCamelCase__ )
return getattr(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__lowerCamelCase : List[Any] = dict(inspect.getmembers(lowerCamelCase__ , inspect.isclass ) )
__lowerCamelCase : Tuple = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase__ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
__lowerCamelCase : List[str] = cls
return pipeline_class
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ) -> List[Any]:
__lowerCamelCase : List[str] = str(lowerCamelCase__ )
__lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
__lowerCamelCase : Any = module_file_or_url
__lowerCamelCase : List[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
__lowerCamelCase : Optional[int] = get_diffusers_versions()
# cut ".dev0"
__lowerCamelCase : Optional[Any] = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
__lowerCamelCase : Dict = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__lowerCamelCase : List[Any] = F"v{revision}"
elif revision == "main":
__lowerCamelCase : Optional[Any] = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__lowerCamelCase : Any = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase__ , pipeline=lowerCamelCase__ )
try:
__lowerCamelCase : Tuple = cached_download(
lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , )
__lowerCamelCase : Tuple = 'git'
__lowerCamelCase : int = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__lowerCamelCase : Optional[Any] = hf_hub_download(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , )
__lowerCamelCase : Dict = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__lowerCamelCase : int = check_imports(lowerCamelCase__ )
# Now we move the module inside our cached dynamic modules.
__lowerCamelCase : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase__ )
__lowerCamelCase : List[str] = Path(lowerCamelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
__lowerCamelCase : List[str] = F"{module_needed}.py"
shutil.copy(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = use_auth_token
elif use_auth_token is True:
__lowerCamelCase : Optional[Any] = HfFolder.get_token()
else:
__lowerCamelCase : List[Any] = None
__lowerCamelCase : int = model_info(lowerCamelCase__ , revision=lowerCamelCase__ , token=lowerCamelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowerCamelCase : Optional[int] = submodule_path / commit_hash
__lowerCamelCase : int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase__ , F"{module_needed}.py" , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> int:
__lowerCamelCase : List[str] = get_cached_module_file(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
return get_class_in_module(lowerCamelCase__ , final_module.replace('.py' , '' ) )
| 337 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a =transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
__lowerCamelCase : List[Any] = [image]
__lowerCamelCase : int = [trans(img.convert('RGB' ) ) for img in image]
__lowerCamelCase : Optional[Any] = torch.stack(lowerCamelCase__ )
return image
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCamelCase : Union[str, Any] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}")
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = max(num_inference_steps - init_timestep ,0)
__lowerCamelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if not isinstance(SCREAMING_SNAKE_CASE__ ,(torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE__)}")
__lowerCamelCase : int = image.to(device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and len(SCREAMING_SNAKE_CASE__) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
__lowerCamelCase : Dict = init_latents.shape
__lowerCamelCase : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
# get latents
print('add noise to latents at timestep' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__( self : int ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] = None ,SCREAMING_SNAKE_CASE__ : float = 0.8 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,):
self.check_inputs(SCREAMING_SNAKE_CASE__)
# 2. Preprocess image
__lowerCamelCase : List[str] = preprocess(SCREAMING_SNAKE_CASE__)
# 3. set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ,device=self.device)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.get_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.device)
__lowerCamelCase : Tuple = timesteps[:1].repeat(SCREAMING_SNAKE_CASE__)
# 4. Prepare latent variables
__lowerCamelCase : Dict = self.prepare_latents(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.unet.dtype ,self.device ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(SCREAMING_SNAKE_CASE__):
# 1. predict noise model_output
__lowerCamelCase : int = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,use_clipped_model_output=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,).prev_sample
__lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 ,1)
__lowerCamelCase : str = image.cpu().permute(0 ,2 ,3 ,1).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__)
| 337 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =FunnelTokenizer
_UpperCAmelCase =FunnelTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: str) ->Tuple:
'''simple docstring'''
super().setUp()
a_ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Tuple , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: int , **a: Any) ->str:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: Tuple , a: str) ->int:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = self.get_tokenizers(do_lower_case=a)
for tokenizer in tokenizers:
a_ = tokenizer("UNwant\u00E9d,running")
a_ = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
a_ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a(*lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
'''simple docstring'''
from .. import __version__
snake_case_ = take_from
snake_case_ = ()
if not isinstance(args[0] , lowercase__ ):
snake_case_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
snake_case_ = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
snake_case_ = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
snake_case_ = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
snake_case_ = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
snake_case_ = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
snake_case_ = inspect.getouterframes(inspect.currentframe() )[1]
snake_case_ = call_frame.filename
snake_case_ = call_frame.lineno
snake_case_ = call_frame.function
snake_case_ , snake_case_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 710 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46 | 0 |
def lowerCAmelCase_ ( _lowercase : list[int]) -> list[list[int]]:
"""simple docstring"""
a__ : Optional[Any] = []
if len(_lowercase) == 1:
return [nums.copy()]
for _ in range(len(_lowercase)):
a__ : Union[str, Any] = nums.pop(0)
a__ : str = permute(_lowercase)
for perm in permutations:
perm.append(_lowercase)
result.extend(_lowercase)
nums.append(_lowercase)
return result
def lowerCAmelCase_ ( _lowercase : List[str]) -> Tuple:
"""simple docstring"""
def backtrack(_lowercase : List[str]):
if start == len(_lowercase) - 1:
output.append(nums[:])
else:
for i in range(_lowercase , len(_lowercase)):
a__ , a__ : Tuple = nums[i], nums[start]
backtrack(start + 1)
a__ , a__ : List[str] = nums[i], nums[start] # backtrack
a__ : List[str] = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowercase : Optional[int] =permutea([1, 2, 3])
print(res)
doctest.testmod()
| 136 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = 1
a__ : str = 3
a__ : Optional[int] = (3_2, 3_2)
a__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
def extract(*__lowercase , **__lowercase ):
class snake_case__ :
"""simple docstring"""
def __init__( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = torch.ones([0] )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ : List[Any] = self.dummy_cond_unet
a__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
a__ : int = self.dummy_vae
a__ : List[Any] = self.dummy_text_encoder
a__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a__ : Any = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
a__ : List[str] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Optional[Any] = """A painting of a squirrel eating a burger"""
a__ : Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
a__ : List[Any] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a__ : Any = output.images
a__ : Tuple = torch.Generator(device=__lowercase ).manual_seed(0 )
a__ : Optional[int] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
a__ : Any = image[0, -3:, -3:, -1]
a__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Any = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[int] = self.dummy_cond_unet
a__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowercase )
a__ : int = self.dummy_vae
a__ : List[Any] = self.dummy_text_encoder
a__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a__ : Optional[int] = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
a__ : Dict = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Tuple = """A painting of a squirrel eating a burger"""
a__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
a__ : List[str] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a__ : List[str] = output.images
a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
a__ : Dict = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
a__ : Optional[Any] = image[0, -3:, -3:, -1]
a__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Union[str, Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(pipe.scheduler , __lowercase )
assert pipe.safety_checker is None
a__ : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
a__ : List[Any] = StableDiffusionPipeline.from_pretrained(__lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a__ : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = self.dummy_cond_unet
a__ : Any = PNDMScheduler(skip_prk_steps=__lowercase )
a__ : int = self.dummy_vae
a__ : List[Any] = self.dummy_text_encoder
a__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
a__ : Union[str, Any] = unet.half()
a__ : List[str] = vae.half()
a__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
a__ : str = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
a__ : str = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : List[Any] = """A painting of a squirrel eating a burger"""
a__ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
a__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : Any = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : int = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
a__ : str = 4_0_0_3_6_6_0_3_4_6
a__ : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
a__ : Tuple = torch.manual_seed(__lowercase )
a__ : List[Any] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a__ : List[str] = output.images
a__ : int = image[0, -3:, -3:, -1]
a__ : List[Any] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a__ : List[Any] = torch.manual_seed(__lowercase )
a__ : Optional[Any] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Union[str, Any] = output.images
a__ : List[str] = image[0, -3:, -3:, -1]
a__ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
a__ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : Optional[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
a__ : Any = 2_7_3_4_9_7_1_7_5_5
a__ : Union[str, Any] = 7
a__ : Tuple = torch.manual_seed(__lowercase )
a__ : Optional[Any] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a__ : Union[str, Any] = output.images
a__ : str = image[0, -3:, -3:, -1]
a__ : Any = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a__ : str = torch.manual_seed(__lowercase )
a__ : Dict = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : str = output.images
a__ : int = image[0, -3:, -3:, -1]
a__ : Optional[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
a__ : List[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Tuple = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
a__ : List[Any] = 1_0_4_4_3_5_5_2_3_4
a__ : List[Any] = 1_2
a__ : Union[str, Any] = torch.manual_seed(__lowercase )
a__ : Optional[int] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a__ : Dict = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a__ : Any = torch.manual_seed(__lowercase )
a__ : Union[str, Any] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Dict = output.images
a__ : List[Any] = image[0, -3:, -3:, -1]
a__ : Optional[int] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 136 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
UpperCamelCase = ['names', 'prefix']
UpperCamelCase = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
UpperCamelCase = ['encoding_errors', 'on_bad_lines']
UpperCamelCase = ['date_format']
@dataclass
class lowercase_ (datasets.BuilderConfig ):
A__ : str = ","
A__ : Optional[str] = None
A__ : Optional[Union[int, List[int], str]] = "infer"
A__ : Optional[List[str]] = None
A__ : Optional[List[str]] = None
A__ : Optional[Union[int, str, List[int], List[str]]] = None
A__ : Optional[Union[List[int], List[str]]] = None
A__ : Optional[str] = None
A__ : bool = True
A__ : Optional[Literal["c", "python", "pyarrow"]] = None
A__ : Dict[Union[int, str], Callable[[Any], Any]] = None
A__ : Optional[list] = None
A__ : Optional[list] = None
A__ : bool = False
A__ : Optional[Union[int, List[int]]] = None
A__ : Optional[int] = None
A__ : Optional[Union[str, List[str]]] = None
A__ : bool = True
A__ : bool = True
A__ : bool = False
A__ : bool = True
A__ : Optional[str] = None
A__ : str = "."
A__ : Optional[str] = None
A__ : str = '"'
A__ : int = 0
A__ : Optional[str] = None
A__ : Optional[str] = None
A__ : Optional[str] = None
A__ : Optional[str] = None
A__ : bool = True
A__ : bool = True
A__ : int = 0
A__ : bool = True
A__ : bool = False
A__ : Optional[str] = None
A__ : int = 10000
A__ : Optional[datasets.Features] = None
A__ : Optional[str] = "strict"
A__ : Literal["error", "warn", "skip"] = "error"
A__ : Optional[str] = None
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
if self.delimiter is not None:
_a = self.delimiter
if self.column_names is not None:
_a = self.column_names
@property
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
_a = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase_ (datasets.ArrowBasedBuilder ):
A__ : Tuple = CsvConfig
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self , a_ ) ->List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
_a = data_files
if isinstance(A__ , A__ ):
_a = [files]
_a = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_a = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
_a = [files]
_a = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase__ ( self , a_ ) ->Optional[int]:
'''simple docstring'''
if self.config.features is not None:
_a = self.config.features.arrow_schema
if all(not require_storage_cast(A__ ) for feature in self.config.features.values() ):
# cheaper cast
_a = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_a = table_cast(A__ , A__ )
return pa_table
def lowerCamelCase__ ( self , a_ ) ->Optional[int]:
'''simple docstring'''
_a = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_a = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
_a = pd.read_csv(A__ , iterator=A__ , dtype=A__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A__ ):
_a = pa.Table.from_pandas(A__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(A__ )}: {e}''' )
raise
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 612 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = (DEISMultistepScheduler,)
_UpperCAmelCase :Optional[Any] = (("num_inference_steps", 25),)
def UpperCAmelCase__ ( self : Tuple , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Dict ={
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Tuple=0 , **snake_case__ : Optional[Any] ):
lowerCamelCase_ : str =dict(self.forward_default_kwargs )
lowerCamelCase_ : int =kwargs.pop("num_inference_steps" , snake_case__ )
lowerCamelCase_ : int =self.dummy_sample
lowerCamelCase_ : Union[str, Any] =0.1 * sample
lowerCamelCase_ : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Optional[int] =self.get_scheduler_config(**snake_case__ )
lowerCamelCase_ : List[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCamelCase_ : str =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCamelCase_ : List[Any] =scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCamelCase_ : Any =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase_ , lowerCamelCase_ : Tuple =sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase_ : Any =scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : str =new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[int]=0 , **snake_case__ : str ):
lowerCamelCase_ : Union[str, Any] =dict(self.forward_default_kwargs )
lowerCamelCase_ : Optional[int] =kwargs.pop("num_inference_steps" , snake_case__ )
lowerCamelCase_ : Any =self.dummy_sample
lowerCamelCase_ : Optional[int] =0.1 * sample
lowerCamelCase_ : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : List[Any] =self.get_scheduler_config()
lowerCamelCase_ : Dict =scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase_ : Optional[int] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCamelCase_ : int =scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase_ : Any =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase_ : List[Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : List[Any] =new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Tuple=None , **snake_case__ : str ):
if scheduler is None:
lowerCamelCase_ : int =self.scheduler_classes[0]
lowerCamelCase_ : Optional[int] =self.get_scheduler_config(**snake_case__ )
lowerCamelCase_ : List[str] =scheduler_class(**snake_case__ )
lowerCamelCase_ : Any =self.scheduler_classes[0]
lowerCamelCase_ : List[Any] =self.get_scheduler_config(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =scheduler_class(**snake_case__ )
lowerCamelCase_ : List[str] =10
lowerCamelCase_ : int =self.dummy_model()
lowerCamelCase_ : Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : str =model(snake_case__ , snake_case__ )
lowerCamelCase_ : int =scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[Any] =dict(self.forward_default_kwargs )
lowerCamelCase_ : List[str] =kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Optional[int] =self.get_scheduler_config()
lowerCamelCase_ : str =scheduler_class(**snake_case__ )
lowerCamelCase_ : Optional[int] =self.dummy_sample
lowerCamelCase_ : int =0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCamelCase_ : Optional[int] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase_ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase_ : Any =dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase_ : List[Any] =scheduler.timesteps[5]
lowerCamelCase_ : str =scheduler.timesteps[6]
lowerCamelCase_ : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCamelCase_ : Any =scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCamelCase_ : List[Any] =DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase_ : Tuple =self.full_loop(scheduler=snake_case__ )
lowerCamelCase_ : List[str] =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
lowerCamelCase_ : Tuple =DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase_ : str =DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : Optional[int] =UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : List[Any] =DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : int =self.full_loop(scheduler=snake_case__ )
lowerCamelCase_ : List[Any] =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCAmelCase__ ( self : Dict ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self : str ):
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def UpperCAmelCase__ ( self : Any ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self : str ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCamelCase_ : Optional[int] =self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Tuple =self.full_loop()
lowerCamelCase_ : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =self.full_loop(prediction_type="v_prediction" )
lowerCamelCase_ : Dict =torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : str =self.scheduler_classes[0]
lowerCamelCase_ : Tuple =self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCamelCase_ : Optional[Any] =scheduler_class(**snake_case__ )
lowerCamelCase_ : Any =10
lowerCamelCase_ : Optional[Any] =self.dummy_model()
lowerCamelCase_ : Tuple =self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : List[Any] =model(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[int] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 153 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( snake_case__ ):
def __init__( self : str , snake_case__ : Union[str, Any] , snake_case__ : Any=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : int=True , snake_case__ : Union[str, Any]=99 , snake_case__ : Tuple=32 , snake_case__ : Optional[Any]=5 , snake_case__ : Dict=4 , snake_case__ : Any=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : List[str]=16 , snake_case__ : int=2 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=True , snake_case__ : int="None" , snake_case__ : Any=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
lowerCamelCase_ : int =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : Optional[Any] =seq_length
lowerCamelCase_ : Tuple =is_training
lowerCamelCase_ : Tuple =use_input_mask
lowerCamelCase_ : str =use_token_type_ids
lowerCamelCase_ : Optional[int] =use_labels
lowerCamelCase_ : Optional[int] =vocab_size
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : Union[str, Any] =num_hidden_layers
lowerCamelCase_ : str =num_attention_heads
lowerCamelCase_ : Dict =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : Optional[Any] =hidden_dropout_prob
lowerCamelCase_ : str =attention_probs_dropout_prob
lowerCamelCase_ : str =max_position_embeddings
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[int] =type_sequence_label_size
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : Union[str, Any] =num_labels
lowerCamelCase_ : Tuple =num_choices
lowerCamelCase_ : Optional[Any] =relative_attention
lowerCamelCase_ : List[Any] =position_biased_input
lowerCamelCase_ : Tuple =pos_att_type
lowerCamelCase_ : Optional[int] =scope
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[Any] =None
if self.use_input_mask:
lowerCamelCase_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase_ : List[Any] =None
if self.use_token_type_ids:
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] =None
lowerCamelCase_ : List[str] =None
lowerCamelCase_ : List[Any] =None
if self.use_labels:
lowerCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : int =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : Any , snake_case__ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] ):
lowerCamelCase_ : int =DebertaVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )[0]
lowerCamelCase_ : Any =model(snake_case__ , token_type_ids=snake_case__ )[0]
lowerCamelCase_ : List[Any] =model(snake_case__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict ):
lowerCamelCase_ : int =DebertaVaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Dict ):
lowerCamelCase_ : str =self.num_labels
lowerCamelCase_ : Union[str, Any] =DebertaVaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] ):
lowerCamelCase_ : str =self.num_labels
lowerCamelCase_ : List[str] =DebertaVaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
lowerCamelCase_ : int =DebertaVaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
lowerCamelCase_ : Tuple =DebertaVaForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : List[str] =config_and_inputs
lowerCamelCase_ : List[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
_UpperCAmelCase :Any = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Tuple =DebertaVaModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[str] =DebertaVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Union[str, Any] =DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
lowerCamelCase_ : Dict =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase_ : Union[str, Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(snake_case__ , attention_mask=snake_case__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ : List[Any] =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 153 | 1 |
def __lowerCAmelCase ( __magic_name__ ):
if len(__magic_name__ ) < 2:
return collection
def circle_sort_util(__magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
_lowercase: int = False
if low == high:
return swapped
_lowercase: str = low
_lowercase: Optional[int] = high
while left < right:
if collection[left] > collection[right]:
_lowercase: int = (
collection[right],
collection[left],
)
_lowercase: Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowercase: List[Any] = (
collection[right + 1],
collection[left],
)
_lowercase: Optional[Any] = True
_lowercase: List[Any] = low + int((high - low) / 2 )
_lowercase: str = circle_sort_util(__magic_name__ , __magic_name__ , __magic_name__ )
_lowercase: Union[str, Any] = circle_sort_util(__magic_name__ , mid + 1 , __magic_name__ )
return swapped or left_swap or right_swap
_lowercase: Optional[int] = True
while is_not_sorted is True:
_lowercase: Optional[int] = circle_sort_util(__magic_name__ , 0 , len(__magic_name__ ) - 1 )
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = input('Enter numbers separated by a comma:\n').strip()
_SCREAMING_SNAKE_CASE : Any = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 710 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206 | 0 |
'''simple docstring'''
def A_( A : bytes):
return "".join([hex(A)[2:].zfill(2).upper() for byte in list(A)])
def A_( A : str):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.')
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A) <= set('0123456789ABCDEF'):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.')
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16) for i in range(0 , len(A) , 2))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase ( UpperCamelCase_: Callable , UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float ) -> Tuple:
'''simple docstring'''
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(UpperCamelCase_ ):
_a = y[k] + step_size * ode_func(UpperCamelCase_ , y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase_ , y[k] ) + ode_func(x + step_size , UpperCamelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase_ (metaclass=_UpperCAmelCase ):
A__ : Any = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def lowerCamelCase__ ( cls , *a_ , **a_ ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def lowerCamelCase__ ( cls , *a_ , **a_ ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 612 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Tuple = None
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : str=False , **UpperCamelCase__ : List[Any] , )-> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Tuple = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: str = add_prefix_space
__lowerCAmelCase: Tuple = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: str = add_prefix_space
def lowercase_ ( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Tuple = kwargs.get("is_split_into_words" , UpperCamelCase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs.")
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict)-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = kwargs.get("is_split_into_words" , UpperCamelCase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs.")
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : "Conversation")-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__) + [self.eos_token_id])
if len(UpperCamelCase__) > self.model_max_length:
__lowerCAmelCase: int = input_ids[-self.model_max_length :]
return input_ids
| 346 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if n == 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase: Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Any = 2
while digits < n:
index += 1
__lowerCAmelCase: Optional[int] = len(str(fibonacci(__SCREAMING_SNAKE_CASE ) ) )
return index
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 346 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[int] = '''mctct'''
def __init__(self , UpperCAmelCase=8_0_6_5 , UpperCAmelCase=1_5_3_6 , UpperCAmelCase=3_6 , UpperCAmelCase=6_1_4_4 , UpperCAmelCase=4 , UpperCAmelCase=3_8_4 , UpperCAmelCase=9_2_0 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.3 , UpperCAmelCase="relu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.3 , UpperCAmelCase=0.3 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0.3 , UpperCAmelCase=1 , UpperCAmelCase=(7,) , UpperCAmelCase=(3,) , UpperCAmelCase=8_0 , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase="sum" , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =attention_head_dim
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =layerdrop
__UpperCAmelCase =hidden_act
__UpperCAmelCase =initializer_range
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =bos_token_id
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =conv_glu_dim
__UpperCAmelCase =conv_dropout
__UpperCAmelCase =num_conv_layers
__UpperCAmelCase =input_feat_per_channel
__UpperCAmelCase =input_channels
__UpperCAmelCase =conv_channels
__UpperCAmelCase =ctc_loss_reduction
__UpperCAmelCase =ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCAmelCase =list(UpperCAmelCase)
__UpperCAmelCase =list(UpperCAmelCase)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""")
| 142 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 | 1 |
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = Rectangle(height=0.5 , width=0.5 )
snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case : Tuple = [mem.copy() for i in range(6 )]
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = Text("CPU" , font_size=24 )
snake_case : Optional[int] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
snake_case : Optional[Any] = [mem.copy() for i in range(1 )]
snake_case : List[str] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[int] = Text("GPU" , font_size=24 )
snake_case : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Any = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Dict = Text("Model" , font_size=24 )
snake_case : Optional[Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
snake_case : Optional[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
snake_case : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case : Any = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
snake_case : Optional[Any] = []
snake_case : Dict = []
snake_case : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
snake_case : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
snake_case : Optional[int] = 0.46 / 4
snake_case : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 178 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __lowerCAmelCase ( lowercase : bool = True , *lowercase : str , **lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
snake_case : Dict = False
if main_process_only:
snake_case : int = PartialState().local_process_index == 0
return _tqdm(*lowercase , **lowercase , disable=lowercase )
| 178 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowerCAmelCase : Tuple = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
lowerCAmelCase : Any = numbers[i]
if number < 0:
lowerCAmelCase , lowerCAmelCase : str = min_till_now, max_till_now
lowerCAmelCase : Tuple = max(SCREAMING_SNAKE_CASE , max_till_now * number )
lowerCAmelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase : List[str] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
snake_case = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
snake_case = """UperNetConfig"""
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[int, Tuple[int, int]] , UpperCAmelCase_ : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , bias=UpperCAmelCase_ , dilation=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.ReLU()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = self.conv(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.batch_norm(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.activation(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE : str = [
nn.AdaptiveAvgPoolad(UpperCAmelCase_ ),
UperNetConvModule(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = input
for layer in self.layers:
SCREAMING_SNAKE_CASE : Optional[Any] = layer(UpperCAmelCase_ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple[int, ...] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
super().__init__()
SCREAMING_SNAKE_CASE : Dict = pool_scales
SCREAMING_SNAKE_CASE : Optional[int] = align_corners
SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE : List[str] = channels
SCREAMING_SNAKE_CASE : str = []
for i, pool_scale in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , channels=UpperCAmelCase_ )
self.blocks.append(UpperCAmelCase_ )
self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE : Dict = ppm(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.interpolate(
UpperCAmelCase_ , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(UpperCAmelCase_ )
return ppm_outs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = config
SCREAMING_SNAKE_CASE : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : str = config.hidden_size
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetConvModule(UpperCAmelCase_ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCAmelCase_ )
self.fpn_convs.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _A ( self : Optional[int] ):
self.apply(self._init_weights )
def _A ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
if isinstance(UpperCAmelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _A ( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = inputs[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = [x]
psp_outs.extend(self.psp_modules(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(UpperCAmelCase_ , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bottleneck(UpperCAmelCase_ )
return output
def _A ( self : Dict , UpperCAmelCase_ : torch.Tensor ):
# build laterals
SCREAMING_SNAKE_CASE : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCAmelCase_ ) )
# build top-down path
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCAmelCase_ , mode="bilinear" , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(UpperCAmelCase_ , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.fpn_bottleneck(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE : int = config
SCREAMING_SNAKE_CASE : Optional[Any] = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE : List[str] = config.auxiliary_channels
SCREAMING_SNAKE_CASE : List[Any] = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE : Optional[int] = in_index
SCREAMING_SNAKE_CASE : List[Any] = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE : int = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE : Optional[int] = nn.Identity()
else:
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*UpperCAmelCase_ )
if self.concat_input:
SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _A ( self : Optional[int] ):
self.apply(self._init_weights )
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] ):
if isinstance(UpperCAmelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ):
# just take the relevant feature maps
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE : Any = self.convs(UpperCAmelCase_ )
if self.concat_input:
SCREAMING_SNAKE_CASE : str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = UperNetConfig
UpperCamelCase_ : List[str] = '''pixel_values'''
UpperCamelCase_ : List[str] = True
def _A ( self : Tuple , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _A ( self : Tuple ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _A ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=False ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = value
snake_case = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowerCAmelCase , )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Any ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetHead(UpperCAmelCase_ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetFCNHead(UpperCAmelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE : Tuple = self.backbone.forward_with_filtered_kwargs(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.feature_maps
SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = nn.functional.interpolate(UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE : Dict = self.auxiliary_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.functional.interpolate(
UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE : Dict = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 62 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
snake_case = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
snake_case = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = RoFormerTokenizer
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : Any="[PAD]" , UpperCAmelCase_ : List[str]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Any = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __getstate__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = d
SCREAMING_SNAKE_CASE : Dict = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE : Any = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase_ ) )
def _A ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
| 62 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class snake_case ( __lowercase ):
def __init__(self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a sound of {}." ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE_ = requests.get(SCREAMING_SNAKE_CASE_ ).content
else:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = ffmpeg_read(SCREAMING_SNAKE_CASE_ , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
SCREAMING_SNAKE_CASE_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = candidate_labels
SCREAMING_SNAKE_CASE_ = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [text_inputs]
return inputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_ = text_inputs[0][0]
SCREAMING_SNAKE_CASE_ = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE_ = model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE_ = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
SCREAMING_SNAKE_CASE_ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase : List[str] = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class lowerCAmelCase__ ( _lowercase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = GPTaTokenizer
def __init__( self : int , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any="<|endoftext|>" , UpperCamelCase_ : Union[str, Any]="<|endoftext|>" , UpperCamelCase_ : Optional[Any]="<|endoftext|>" , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : List[str] = kwargs.pop('''add_bos_token''' , UpperCamelCase_ )
lowerCamelCase_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCamelCase_ : Tuple = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Union[str, Any] = add_prefix_space
lowerCamelCase_ : Any = pre_tok_class(**UpperCamelCase_ )
lowerCamelCase_ : int = add_prefix_space
def __UpperCamelCase ( self : Optional[int] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] = None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Dict = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCamelCase ( self : int , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
lowerCamelCase_ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 501 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
for char in word:
lowerCamelCase_ : List[str] = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = set()
for token in tokens:
lowerCamelCase_ : Optional[int] = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
lowerCamelCase_ : List[str] = list(_lowercase )
return word_list
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCamelCase_ : List[Any] = max([len(_lowercase ) for w in chinese_word_set] )
lowerCamelCase_ : int = bert_tokens
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = 0, len(_lowercase )
while start < end:
lowerCamelCase_ : int = True
if is_chinese(bert_word[start] ):
lowerCamelCase_ : Dict = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
lowerCamelCase_ : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase_ : Union[str, Any] = '''##''' + bert_word[j]
lowerCamelCase_ : Union[str, Any] = start + i
lowerCamelCase_ : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
lowerCamelCase_ : Any = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCamelCase_ : Union[str, Any] = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
lowerCamelCase_ : Optional[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
lowerCamelCase_ : Tuple = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowercase ) == len(_lowercase )
lowerCamelCase_ : str = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
lowerCamelCase_ : Tuple = []
for id in input_ids:
lowerCamelCase_ : Tuple = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
lowerCamelCase_ : List[Any] = add_sub_symbol(_lowercase , _lowercase )
lowerCamelCase_ : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
lowerCamelCase_ : Optional[int] = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ : List[Any] = f.readlines()
lowerCamelCase_ : List[Any] = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase_ : Optional[int] = LTP(args.ltp ) # faster in GPU device
lowerCamelCase_ : Tuple = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase_ : int = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ : Optional[Any] = [json.dumps(_lowercase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__lowercase : Union[str, Any] = parser.parse_args()
main(args)
| 422 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Optional[int] , A_ : Union[str, Any]=13 , A_ : Optional[Any]=30 , A_ : Dict=2 , A_ : Tuple=3 , A_ : Tuple=True , A_ : Tuple=True , A_ : str=32 , A_ : List[Any]=5 , A_ : List[str]=4 , A_ : Dict=37 , A_ : List[Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]=0.1 , A_ : Optional[int]=10 , A_ : List[str]=0.02 , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase ( self : int ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int] ) -> str:
__snake_case = FlaxViTModel(config=A_ )
__snake_case = model(A_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (self.image_size, self.image_size)
__snake_case = (self.patch_size, self.patch_size)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase ( self : List[Any] , A_ : List[str] , A_ : Tuple ) -> Optional[int]:
__snake_case = self.type_sequence_label_size
__snake_case = FlaxViTForImageClassification(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxViTForImageClassification(A_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(A_ )
def lowercase ( self : Any ) -> str:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase ( self : int ) -> None:
__snake_case = FlaxViTModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase ( self : str ) -> Any:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : str ) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def lowercase ( self : List[Any] ) -> List[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = model_class(A_ )
@jax.jit
def model_jitted(A_ : str , **A_ : int ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase ( self : int ) -> Dict:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
| 708 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : int = logging.get_logger(__name__)
__lowercase : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowercase : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowercase : List[str] = {"facebook/blenderbot-3B": 128}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : str = BlenderbotTokenizer
def __init__( self : Union[str, Any] , A_ : Any=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Union[str, Any]="replace" , A_ : Union[str, Any]="<s>" , A_ : Union[str, Any]="</s>" , A_ : Optional[int]="</s>" , A_ : List[Any]="<s>" , A_ : Union[str, Any]="<unk>" , A_ : Any="<pad>" , A_ : List[str]="<mask>" , A_ : int=False , A_ : Tuple=True , **A_ : Optional[Any] , ) -> int:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = getattr(A_ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**A_ )
__snake_case = add_prefix_space
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , A_ ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(A_ , state.pop('''type''' ) )
__snake_case = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : int , A_ : Union[str, Any] ) -> List[Any]:
__snake_case = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__snake_case = value
def lowercase ( self : Any , *A_ : List[str] , **A_ : Optional[int] ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase ( self : str , *A_ : Tuple , **A_ : str ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def lowercase ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
__snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
__snake_case = ''' '''.join(A_ )
__snake_case = self.encode(A_ )
if len(A_ ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 93 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any]=13 , _snake_case : Tuple=64 , _snake_case : Any=2 , _snake_case : Optional[Any]=3 , _snake_case : Any=True , _snake_case : Dict=True , _snake_case : Optional[int]=32 , _snake_case : List[str]=5 , _snake_case : Union[str, Any]=4 , _snake_case : str=37 , _snake_case : str="gelu" , _snake_case : Dict=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=10 , _snake_case : Optional[int]=0.02 , _snake_case : Optional[Any]=[1, 16, 4, 4] , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : int ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : int = False
A__ : Dict = False
A__ : Tuple = False
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : Any ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Optional[Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : int ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : str ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 9 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from __future__ import annotations
import math
def UpperCamelCase__ ( A__ , A__ ) -> float:
snake_case__ : Dict = u
for i in range(1 , A__ ):
snake_case__ : List[str] = temp * (u - i)
return temp
def UpperCamelCase__ ( ) -> None:
snake_case__ : List[str] = int(input('enter the numbers of values: ' ) )
snake_case__ : list[list[float]] = []
for _ in range(A__ ):
y.append([] )
for i in range(A__ ):
for j in range(A__ ):
y[i].append(A__ )
snake_case__ : List[Any] = 0
print('enter the values of parameters in a list: ' )
snake_case__ : str = list(map(A__ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(A__ ):
snake_case__ : Tuple = float(input() )
snake_case__ : Tuple = int(input('enter the value to interpolate: ' ) )
snake_case__ : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A__ ):
for j in range(n - i ):
snake_case__ : Any = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : int = y[0][0]
for i in range(1 , A__ ):
summ += (ucal(A__ , A__ ) * y[0][i]) / math.factorial(A__ )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 709 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase : Dict = {
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = TaTokenizer
_lowerCAmelCase = []
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=100 , lowerCamelCase=None , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case__ : Optional[int] = len(set(filter(lambda lowerCamelCase : bool('''extra_id_''' in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : List[str] = vocab_file
snake_case__ : str = False if not self.vocab_file else True
snake_case__ : Dict = extra_ids
@staticmethod
def lowercase__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case__ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowerCamelCase , )
return max_model_length
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case__ : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowerCamelCase : bool(re.search(R'''<extra_id_\d+>''' , lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
return [self.convert_tokens_to_ids(lowerCamelCase ) for token in self.get_sentinel_tokens()]
| 261 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Dict = str(bin(snake_case__ ) )
binary_number += "0" * shift_amount
return binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Tuple = str(bin(snake_case__ ) )[2:]
if shift_amount >= len(snake_case__ ):
return "0b0"
snake_case__ : Dict = binary_number[: len(snake_case__ ) - shift_amount]
return "0b" + shifted_binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number >= 0: # Get binary representation of positive number
snake_case__ : Optional[Any] = '''0''' + str(bin(snake_case__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ : Tuple = len(bin(snake_case__ )[3:] ) # Find 2's complement of number
snake_case__ : Optional[Any] = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
snake_case__ : Optional[Any] = (
'''1''' + '''0''' * (binary_number_length - len(snake_case__ )) + binary_number
)
if shift_amount >= len(snake_case__ ):
return "0b" + binary_number[0] * len(snake_case__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( _lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''CLIPImageProcessor'''
UpperCamelCase__ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Tuple ,a__ : Optional[int]=None ,a__ : Dict=None ,**a__ : Optional[Any] ):
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,A__ ,)
a__ = kwargs.pop("feature_extractor" )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A__ ,A__ )
def __call__( self : Optional[Any] ,a__ : Optional[Any]=None ,a__ : Optional[Any]=None ,a__ : Any=None ,**a__ : List[str] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
a__ = self.tokenizer(A__ ,return_tensors=A__ ,**A__ )
if images is not None:
a__ = self.image_processor(A__ ,return_tensors=A__ ,**A__ )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) ,tensor_type=A__ )
def lowerCAmelCase_ ( self : Optional[Any] ,*a__ : List[Any] ,**a__ : Dict ):
return self.tokenizer.batch_decode(*A__ ,**A__ )
def lowerCAmelCase_ ( self : int ,*a__ : List[Any] ,**a__ : int ):
return self.tokenizer.decode(*A__ ,**A__ )
@property
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 700 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = '''mvp'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,a__ : Dict=5_02_67 ,a__ : Tuple=10_24 ,a__ : str=12 ,a__ : Any=40_96 ,a__ : List[Any]=16 ,a__ : Dict=12 ,a__ : Any=40_96 ,a__ : Optional[int]=16 ,a__ : Optional[int]=0.0 ,a__ : List[str]=0.0 ,a__ : Dict="gelu" ,a__ : int=10_24 ,a__ : int=0.1 ,a__ : Any=0.0 ,a__ : Optional[Any]=0.0 ,a__ : List[str]=0.02 ,a__ : Dict=0.0 ,a__ : str=False ,a__ : Any=True ,a__ : Union[str, Any]=1 ,a__ : str=0 ,a__ : List[Any]=2 ,a__ : List[Any]=True ,a__ : Optional[Any]=2 ,a__ : Optional[int]=2 ,a__ : Union[str, Any]=False ,a__ : int=1_00 ,a__ : List[str]=8_00 ,**a__ : Union[str, Any] ,):
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = classifier_dropout
a__ = use_cache
a__ = encoder_layers
a__ = scale_embedding # scale factor will be sqrt(d_model) if True
a__ = use_prompt
a__ = prompt_length
a__ = prompt_mid_dim
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,is_encoder_decoder=a__ ,decoder_start_token_id=a__ ,forced_eos_token_id=a__ ,**a__ ,)
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,a__ ):
a__ = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
| 394 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Union[str, Any] =logging.get_logger(__name__)
a__ : Union[str, Any] ={'''vocab_file''': '''spiece.model'''}
a__ : Any ={
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
a__ : Dict ={
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a__ : str ='''▁'''
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , __A : str , __A : List[Any]=True , __A : str=True , __A : Optional[int]=False , __A : Union[str, Any]="[CLS]" , __A : str="[SEP]" , __A : int="<unk>" , __A : Tuple="[SEP]" , __A : Union[str, Any]="<pad>" , __A : Dict="[CLS]" , __A : Optional[Any]="[MASK]" , __A : Optional[Dict[str, Any]] = None , **__A : int , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def _lowerCamelCase ( self : Optional[int] ):
return len(self.sp_model )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : List[Any] , __A : List[str] ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : int , __A : Optional[int] ):
if self.remove_space:
__UpperCamelCase = ' '.join(inputs.strip().split() )
else:
__UpperCamelCase = inputs
__UpperCamelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__UpperCamelCase = unicodedata.normalize('NFKD' , __A )
__UpperCamelCase = ''.join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
__UpperCamelCase = outputs.lower()
return outputs
def _lowerCamelCase ( self : Tuple , __A : str ):
__UpperCamelCase = self.preprocess_text(__A )
__UpperCamelCase = self.sp_model.encode(__A , out_type=__A )
__UpperCamelCase = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase = cur_pieces[1:]
else:
__UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def _lowerCamelCase ( self : Dict , __A : Dict ):
return self.sp_model.PieceToId(__A )
def _lowerCamelCase ( self : List[str] , __A : str ):
return self.sp_model.IdToPiece(__A )
def _lowerCamelCase ( self : int , __A : Any ):
__UpperCamelCase = []
__UpperCamelCase = ''
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(__A )
__UpperCamelCase = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _lowerCamelCase ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def _lowerCamelCase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Dict , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : List[str] =logging.get_logger(__name__)
a__ : List[str] ={
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] ="gptj"
SCREAMING_SNAKE_CASE_ : List[Any] ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __A : Optional[int]=5_0_4_0_0 , __A : str=2_0_4_8 , __A : str=4_0_9_6 , __A : Any=2_8 , __A : List[str]=1_6 , __A : Any=6_4 , __A : Any=None , __A : str="gelu_new" , __A : Union[str, Any]=0.0 , __A : List[str]=0.0 , __A : List[str]=0.0 , __A : List[Any]=1e-5 , __A : Tuple=0.02 , __A : Dict=True , __A : List[Any]=5_0_2_5_6 , __A : int=5_0_2_5_6 , __A : int=False , **__A : Union[str, Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , __A : PretrainedConfig , __A : str = "default" , __A : List[PatchingSpec] = None , __A : bool = False , ):
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , 'pad_token_id' , __A ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Any ):
return self._config.n_layer
@property
def _lowerCamelCase ( self : List[str] ):
return self._config.n_head
def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
__UpperCamelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : List[str] ):
return 1_3
| 399 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'xlm-roberta-xl'
def __init__(self , lowercase=250880 , lowercase=2560 , lowercase=36 , lowercase=32 , lowercase=10240 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=1E-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
A_ : List[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : int = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Tuple = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : str = position_embedding_type
A_ : List[Any] = use_cache
A_ : Dict = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 597 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( A__ ):
"""simple docstring"""
def __init__( self , *_snake_case , _snake_case=None , _snake_case=None , **_snake_case ):
super().__init__(*_snake_case , **_snake_case )
_UpperCAmelCase =eval_examples
_UpperCAmelCase =post_process_function
def SCREAMING_SNAKE_CASE ( self , _snake_case = None , _snake_case=None , _snake_case = None , _snake_case = "eval" , **_snake_case , ):
_UpperCAmelCase =gen_kwargs.copy()
_UpperCAmelCase =(
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
_UpperCAmelCase =(
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
_UpperCAmelCase =gen_kwargs
_UpperCAmelCase =self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase =self.get_eval_dataloader(_snake_case )
_UpperCAmelCase =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase =self.compute_metrics
_UpperCAmelCase =None
_UpperCAmelCase =time.time()
_UpperCAmelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase =eval_loop(
_snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
_UpperCAmelCase =compute_metrics
_UpperCAmelCase =self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase =self.post_process_function(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase =self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_UpperCAmelCase =metrics.pop(_snake_case )
metrics.update(output.metrics )
else:
_UpperCAmelCase =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase =self.callback_handler.on_evaluate(self.args , self.state , self.control , _snake_case )
return metrics
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case=None , _snake_case = "test" , **_snake_case ):
_UpperCAmelCase =gen_kwargs.copy()
_UpperCAmelCase =self.get_test_dataloader(_snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase =self.compute_metrics
_UpperCAmelCase =None
_UpperCAmelCase =time.time()
_UpperCAmelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase =eval_loop(
_snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
_UpperCAmelCase =compute_metrics
_UpperCAmelCase =self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase =self.post_process_function(_snake_case , _snake_case , _snake_case , "predict" )
_UpperCAmelCase =self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_UpperCAmelCase =metrics.pop(_snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_snake_case )
| 408 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__UpperCAmelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCamelCase__ ( unittest.TestCase , __a ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = load_tool("""text-question-answering""" )
self.tool.setup()
SCREAMING_SNAKE_CASE : Any = load_tool("""text-question-answering""" , remote=a_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tool(a_ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(a_ , """launched the BigScience Research Workshop""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.remote_tool(a_ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(a_ , """launched the BigScience Research Workshop""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tool(text=a_ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(a_ , """launched the BigScience Research Workshop""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.remote_tool(text=a_ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(a_ , """launched the BigScience Research Workshop""" )
| 706 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase = """"""
__UpperCAmelCase = """"""
__UpperCAmelCase = """"""
__UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ )
print("""Processing...""" )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for index, image in enumerate(lowerCamelCase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 )
SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(lowerCamelCase_ )
with open(f'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = []
for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ):
SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(lowerCamelCase_ ) as in_file:
SCREAMING_SNAKE_CASE : Any = in_file.readlines()
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE : Tuple = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCamelCase_ )
labels.append(lowerCamelCase_ )
return img_paths, labels
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Dict = img_list[idx]
path_list.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = anno_list[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ )
if flip_type == 1:
SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCamelCase_ )
new_imgs_list.append(lowerCamelCase_ )
return new_imgs_list, new_annos_lists, path_list
def __A ( lowerCamelCase_ = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits
return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 79 | 0 |
import argparse
from collections import defaultdict
def _lowercase( __a : Union[str, Any] , __a : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] ):
a__ =f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =f"""class {class_name}("""
a__ =f"""{4 * ' '}def {test_name}("""
a__ =f"""{8 * ' '}{correct_line.split()[0]}"""
a__ =f"""{16 * ' '}{correct_line.split()[0]}"""
a__ =False
a__ =False
a__ =False
a__ =False
a__ =0
a__ =0
a__ =[]
for line in lines:
if line.startswith(__a ):
a__ =True
elif in_class and line.startswith(__a ):
a__ =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
a__ =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
a__ =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
a__ =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
a__ =a__ =a__ =a__ =False
else:
new_lines.append(__a )
with open(__a , 'w' ) as f:
for line in new_lines:
f.write(__a )
def _lowercase( __a : int , __a : Union[str, Any]=None ):
if fail is not None:
with open(__a , 'r' ) as f:
a__ ={l.strip() for l in f.readlines()}
else:
a__ =None
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =defaultdict(__a )
for line in correct_lines:
a__ , a__ , a__ , a__ =line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_lowerCAmelCase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Union[str, Any] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 433 |
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A ) ->None:
UpperCAmelCase__ :Dict = data
# Initialize hash values
UpperCAmelCase__ :str = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
UpperCAmelCase__ :str = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
UpperCAmelCase__ :Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( A ) ->bytes:
UpperCAmelCase__ :List[Any] = b'\x80' + (b'\x00' * (63 - (len(A ) + 8) % 64))
UpperCAmelCase__ :Optional[int] = struct.pack('>Q' , (len(A ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) ->None:
# Convert into blocks of 64 bytes
UpperCAmelCase__ :List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase__ :Any = list(struct.unpack('>16L' , A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase__ :Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase__ :Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase__ :str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
UpperCAmelCase__ :Tuple = self.ror(A , 6 ) ^ self.ror(A , 11 ) ^ self.ror(A , 25 )
UpperCAmelCase__ :int = (e & f) ^ ((~e & 0xff_fff_fff) & g)
UpperCAmelCase__ :str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
UpperCAmelCase__ :Optional[int] = self.ror(A , 2 ) ^ self.ror(A , 13 ) ^ self.ror(A , 22 )
UpperCAmelCase__ :Optional[int] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase__ :Optional[int] = (sa + maj) % 0x100_000_000
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
UpperCAmelCase__ :str = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase__ :Tuple = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase__ :Dict = ''.join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , A , A ) ->int:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->None:
import hashlib
UpperCAmelCase__ :Optional[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(A ).hash , hashlib.shaaaa(A ).hexdigest() )
def A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase__ :List[Any] = parser.parse_args()
UpperCAmelCase__ :List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase__ :Optional[Any] = f.read()
else:
UpperCAmelCase__ :Any = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 433 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A : Optional[Any] = logging.getLogger(__name__)
def _a ( ):
snake_case : Dict =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__a , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__a , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__a , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__a , default='''data/dump''' , help='''The dump file prefix.''' )
snake_case : Optional[int] =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
snake_case : int =BertTokenizer.from_pretrained(args.tokenizer_name )
snake_case : Union[str, Any] =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
snake_case : Tuple =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
snake_case : Optional[Any] =RobertaTokenizer.from_pretrained(args.tokenizer_name )
snake_case : str =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
snake_case : Optional[Any] =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
snake_case : List[str] =GPTaTokenizer.from_pretrained(args.tokenizer_name )
snake_case : List[Any] =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
snake_case : List[str] =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
snake_case : int =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__a )} examples to process.''' )
snake_case : Any =[]
snake_case : Union[str, Any] =0
snake_case : List[Any] =1_00_00
snake_case : int =time.time()
for text in data:
snake_case : Optional[int] =F'''{bos} {text.strip()} {sep}'''
snake_case : Tuple =tokenizer.encode(__a , add_special_tokens=__a )
rslt.append(__a )
iter += 1
if iter % interval == 0:
snake_case : Optional[int] =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
snake_case : Union[str, Any] =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__a )} examples processed.''' )
snake_case : Dict =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
snake_case : Optional[Any] =tokenizer.vocab_size
if vocab_size < (1 << 16):
snake_case : Optional[Any] =[np.uintaa(__a ) for d in rslt]
else:
snake_case : Union[str, Any] =[np.intaa(__a ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__a , '''wb''' ) as handle:
pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
__snake_case = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ElectraTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :Any = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Any = do_lower_case
UpperCamelCase__ :Dict = strip_accents
UpperCamelCase__ :Optional[int] = tokenize_chinese_chars
UpperCamelCase__ :Dict = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :str = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = [self.sep_token_id]
UpperCamelCase__ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 189 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowerCamelCase__ ( UpperCamelCase_ ):
lowerCAmelCase = """imagegpt"""
lowerCAmelCase = ["""past_key_values"""]
lowerCAmelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , _lowercase : Dict=512 + 1 , _lowercase : Tuple=32 * 32 , _lowercase : Tuple=512 , _lowercase : str=24 , _lowercase : Dict=8 , _lowercase : str=None , _lowercase : str="quick_gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Any=0.1 , _lowercase : int=1e-5 , _lowercase : Dict=0.0_2 , _lowercase : str=True , _lowercase : str=True , _lowercase : Optional[Any]=False , _lowercase : str=False , _lowercase : List[Any]=False , **_lowercase : List[Any] , ):
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = scale_attn_by_inverse_layer_idx
A = reorder_and_upcast_attn
A = tie_word_embeddings
super().__init__(tie_word_embeddings=_a , **_a )
class lowerCamelCase__ ( UpperCamelCase_ ):
@property
def __a ( self : Optional[int] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __a ( self : Dict , _lowercase : List[Any] , _lowercase : str = 1 , _lowercase : List[Any] = -1 , _lowercase : List[str] = False , _lowercase : List[str] = None , _lowercase : Optional[int] = 3 , _lowercase : Any = 32 , _lowercase : Dict = 32 , ):
A = self._generate_dummy_images(_a , _a , _a , _a )
A = dict(preprocessor(images=_a , return_tensors=_a ) )
return inputs
| 712 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase : int = 16
UpperCamelCase : Optional[int] = 32
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = "bert-base-cased" ) -> Optional[int]:
"""simple docstring"""
A = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(UpperCamelCase__ )
A , A = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
A = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
A = model(**UpperCamelCase__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCamelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCamelCase__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]=False) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = OmegaConf.load(_lowerCamelCase)
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase)))
return config
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : int=None , _lowerCamelCase : List[str]=None) -> str:
'''simple docstring'''
if conf_path is None:
__UpperCamelCase : Dict = "./model_checkpoints/vqgan_only.yaml"
__UpperCamelCase : Dict = load_config(_lowerCamelCase , display=_lowerCamelCase)
__UpperCamelCase : Any = VQModel(**config.model.params)
if ckpt_path is None:
__UpperCamelCase : List[str] = "./model_checkpoints/vqgan_only.pt"
__UpperCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase)
if ".ckpt" in ckpt_path:
__UpperCamelCase : Optional[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
model.to(_lowerCamelCase)
del sd
return model
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = model.encode(_lowerCamelCase)
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}')
__UpperCamelCase : List[str] = model.decode(_lowerCamelCase)
return xrec
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=False) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Tuple = string.rsplit("." , 1)
if reload:
__UpperCamelCase : Tuple = importlib.import_module(_lowerCamelCase)
importlib.reload(_lowerCamelCase)
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase) , cls)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> str:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params" , {}))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=True) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Any = instantiate_from_config(_lowerCamelCase)
if sd is not None:
model.load_state_dict(_lowerCamelCase)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> Union[str, Any]:
'''simple docstring'''
if ckpt:
__UpperCamelCase : Any = torch.load(_lowerCamelCase , map_location="cpu")
__UpperCamelCase : List[str] = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.')
else:
__UpperCamelCase : int = {"state_dict": None}
__UpperCamelCase : str = None
__UpperCamelCase : Union[str, Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase)["model"]
return model, global_step | 557 |
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : int = 0
while num > 0:
__UpperCamelCase : List[Any] = num % 8
__UpperCamelCase : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase)))
counter += 1
__UpperCamelCase : Optional[Any] = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(_lowerCamelCase)}'
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print("\n2 in octal is:")
print(decimal_to_octal(2)) # = 2
print("\n8 in octal is:")
print(decimal_to_octal(8)) # = 10
print("\n65 in octal is:")
print(decimal_to_octal(65)) # = 101
print("\n216 in octal is:")
print(decimal_to_octal(216)) # = 330
print("\n512 in octal is:")
print(decimal_to_octal(512)) # = 1000
print("\n")
if __name__ == "__main__":
main() | 557 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 262 |
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowerCamelCase : Optional[Any] =[
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowerCamelCase : Dict =[2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE_ , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase : Any =primes[:idx]
break
lowerCamelCase , lowerCamelCase : Any =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase : Union[str, Any] =False
for r in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] =pow(SCREAMING_SNAKE_CASE_ , d * 2**r , SCREAMING_SNAKE_CASE_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase : List[str] =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A__ ( ) -> None:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 262 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase__ : Any = get_logger(__name__)
class __lowercase :
__UpperCAmelCase = '''dummy_data'''
__UpperCAmelCase = '''datasets'''
__UpperCAmelCase = False
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = True , lowercase_ = None , ) -> Any:
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(lowercase_)
# to be downloaded
__snake_case = None
__snake_case = None
@property
def _a ( self) -> int:
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def _a ( self) -> Dict:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def _a ( self) -> Dict:
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def _a ( self) -> List[Any]:
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_)
return os.path.join(lowercase_ , self.dummy_file_name)
@property
def _a ( self) -> Union[str, Any]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def _a ( self) -> int:
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def _a ( self) -> List[str]:
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def _a ( self , lowercase_ , *lowercase_) -> Any:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase_ , lowercase_):
return self.create_dummy_data_dict(lowercase_ , lowercase_)
elif isinstance(lowercase_ , (list, tuple)):
return self.create_dummy_data_list(lowercase_ , lowercase_)
else:
return self.create_dummy_data_single(lowercase_ , lowercase_)
def _a ( self , lowercase_ , *lowercase_) -> Union[str, Any]:
return self.download_and_extract(lowercase_)
def _a ( self , lowercase_ , lowercase_) -> Tuple:
return self.download_and_extract(lowercase_)
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[str]:
return path
def _a ( self) -> Dict:
return {}
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase_ , lowercase_):
for single_url in single_urls:
download_callback(lowercase_)
else:
__snake_case = single_urls
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase_ , lowercase_):
__snake_case = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name)) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name))
__snake_case = value
# make sure that values are unique
if all(isinstance(lowercase_ , lowercase_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _a ( self , lowercase_ , lowercase_) -> Dict:
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase_)) for url in data_url)
__snake_case = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(lowercase_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(lowercase_)
return dummy_data_list
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(lowercase_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _a ( self) -> List[Any]:
pass
def _a ( self) -> str:
pass
def _a ( self , lowercase_) -> List[Any]:
def _iter_archive_members(lowercase_):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file).parent
__snake_case = path.relative_to(lowercase_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(lowercase_)
__snake_case = Path(lowercase_)
__snake_case = _iter_archive_members(lowercase_) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(lowercase_).as_posix(), file_path.open('rb')
def _a ( self , lowercase_) -> int:
if not isinstance(lowercase_ , lowercase_):
__snake_case = [paths]
for path in paths:
if os.path.isfile(lowercase_):
if os.path.basename(lowercase_).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase_):
if os.path.basename(lowercase_).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(lowercase_):
if filename.startswith(('.', '__')):
continue
yield os.path.join(lowercase_ , lowercase_)
| 313 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class snake_case__ ( _A ):
_snake_case : Dict = field(default="""text-classification""", metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : int = Features({"""text""": Value("""string""" )} )
_snake_case : int = Features({"""labels""": ClassLabel} )
_snake_case : List[str] = """text"""
_snake_case : Optional[int] = """labels"""
def a__ ( self , lowerCamelCase ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def a__ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 709 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> list[int]:
__lowerCAmelCase = int(lowercase )
# Initialize Result
__lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : str = []
_a : Optional[int] = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_a : Union[str, Any] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
_a : Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_a : Optional[int] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_a : Any = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'Following is minimal change for {value}: ')
_a : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : jnp.ndarray
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module , __lowercase , __lowercase ):
UpperCamelCase_ : int = 32
UpperCamelCase_ : int = 4
UpperCamelCase_ : int = 4
UpperCamelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCamelCase_ : Union[bool, Tuple[bool]] = False
UpperCamelCase_ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase_ : int = 2
UpperCamelCase_ : Union[int, Tuple[int]] = 8
UpperCamelCase_ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase_ : int = 1280
UpperCamelCase_ : float = 0.0
UpperCamelCase_ : bool = False
UpperCamelCase_ : jnp.dtype = jnp.floataa
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = False
def A_ ( self , a ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(a , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(a )
_UpperCamelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a , a , a , a )["params"]
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(a , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(a , a ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a , a ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(a ) )
_UpperCamelCase = list(reversed(a ) )
_UpperCamelCase = list(reversed(a ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(a ) - 1 )]
_UpperCamelCase = i == len(a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=a , out_channels=a , prev_output_channel=a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=a , out_channels=a , prev_output_channel=a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(a )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a , a , a , a=None , a=None , a = True , a = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(a , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(a , 0 )
_UpperCamelCase = self.time_proj(a )
_UpperCamelCase = self.time_embedding(a )
# 2. pre-process
_UpperCamelCase = jnp.transpose(a , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(a )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(a , a ):
_UpperCamelCase , _UpperCamelCase = down_block(a , a , a , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(a , a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
a , a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(a , a , a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(a , a ):
_UpperCamelCase = up_block(
a , temb=a , encoder_hidden_states=a , res_hidden_states_tuple=a , deterministic=not train , )
else:
_UpperCamelCase = up_block(a , temb=a , res_hidden_states_tuple=a , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(a )
_UpperCamelCase = nn.silu(a )
_UpperCamelCase = self.conv_out(a )
_UpperCamelCase = jnp.transpose(a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=a )
| 202 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "deta"
UpperCamelCase_ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ) -> Dict:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(a , a ):
_UpperCamelCase = backbone_config.pop("""model_type""" )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(a )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.d_model
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 202 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ChineseCLIPFeatureExtractor"]
UpperCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
# Get the sagemaker specific mp parameters from smp_options variable.
a__ :str = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a__ :str = json.loads(a )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a__ :Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a__ :str = json.loads(a )
if not mpi_options.get("sagemaker_mpi_enabled" , a ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = field(
default='' ,metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} ,)
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __A , )
@cached_property
def _snake_case ( self : List[Any] ) ->"torch.device":
"""simple docstring"""
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
a__ :str = torch.device("cpu" )
a__ :Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
a__ :Union[str, Any] = smp.local_rank()
a__ :Tuple = torch.device("cuda" , __A )
a__ :Any = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
a__ :Optional[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
a__ :Any = torch.device("cuda" , self.local_rank )
a__ :List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a__ :Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a__ :Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
a__ :List[Any] = torch.device("cuda" , self.local_rank )
a__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return False
| 395 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = []
create_all_state(1, a_, a_, [], a_)
return result
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case, ):
if level == 0:
total_list.append(current_list[:])
return
for i in range(a_, total_number - level + 2):
current_list.append(a_)
create_all_state(i + 1, a_, level - 1, a_, a_)
current_list.pop()
def SCREAMING_SNAKE_CASE ( snake_case):
for i in total_list:
print(*a_)
if __name__ == "__main__":
__lowercase : Optional[int] = 4
__lowercase : Tuple = 2
__lowercase : Tuple = generate_all_combinations(n, k)
print_all_state(total_list) | 718 | """simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : List[str] , A_ : List[Any]=100 , A_ : Any=13 , A_ : Dict=30 , A_ : Optional[int]=2 , A_ : str=3 , A_ : Tuple=True , A_ : str=True , A_ : Union[str, Any]=32 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=37 , A_ : Any="gelu" , A_ : List[str]=0.1 , A_ : int=0.1 , A_ : Tuple=10 , A_ : int=0.02 , A_ : Tuple=3 , ) -> Optional[int]:
__snake_case = parent
__snake_case = vocab_size
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase ( self : Optional[int] , A_ : str , A_ : str , A_ : List[str] ) -> List[Any]:
__snake_case = FlaxBeitModel(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : List[str] , A_ : Any , A_ : Any , A_ : str ) -> str:
__snake_case = FlaxBeitForMaskedImageModeling(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : List[str] ) -> str:
__snake_case = self.type_sequence_label_size
__snake_case = FlaxBeitForImageClassification(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxBeitForImageClassification(A_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(A_ )
def lowercase ( self : Dict ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase ( self : List[Any] ) -> None:
__snake_case = FlaxBeitModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase ( self : int ) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase ( self : Union[str, Any] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = model_class(A_ )
@jax.jit
def model_jitted(A_ : Union[str, Any] , **A_ : Union[str, Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : str ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowercase ( self : str ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
def SCREAMING_SNAKE_CASE ( ):
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Union[str, Any] ) -> List[str]:
__snake_case = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__snake_case = np.ones((1, 196) , dtype=A_ )
# forward pass
__snake_case = model(pixel_values=A_ , bool_masked_pos=A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 196, 8_192)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) )
@slow
def lowercase ( self : List[Any] ) -> List[str]:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 1_000)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def lowercase ( self : int ) -> str:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 21_841)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 2_396
self.assertEqual(logits.argmax(-1 ).item() , A_ ) | 93 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
A_ : Union[str, Any] = 'Usage of script: script_name <size_of_canvas:int>'
A_ : str = [0] * 100 + [1] * 10
random.shuffle(choice)
def snake_case (UpperCAmelCase__ ) -> list[list[bool]]:
UpperCamelCase_: str = [[False for i in range(UpperCAmelCase__ )] for j in range(UpperCAmelCase__ )]
return canvas
def snake_case (UpperCAmelCase__ ) -> None:
for i, row in enumerate(UpperCAmelCase__ ):
for j, _ in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[str] = bool(random.getrandbits(1 ) )
def snake_case (UpperCAmelCase__ ) -> list[list[bool]]:
UpperCamelCase_: List[Any] = np.array(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCAmelCase__ ):
for c, pt in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: int = __judge_point(
UpperCAmelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCamelCase_: str = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCamelCase_: list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> bool:
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: Tuple = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCamelCase_: Tuple = pt
if pt:
if alive < 2:
UpperCamelCase_: Tuple = False
elif alive == 2 or alive == 3:
UpperCamelCase_: Any = True
elif alive > 3:
UpperCamelCase_: List[Any] = False
else:
if alive == 3:
UpperCamelCase_: List[str] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
A_ : List[Any] = int(sys.argv[1])
# main working structure of this module.
A_ : Union[str, Any] = create_canvas(canvas_size)
seed(c)
A_ , A_ : str = plt.subplots()
fig.show()
A_ : Union[str, Any] = ListedColormap(['w', 'k'])
try:
while True:
A_ : Optional[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 57 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any=0 ):
lowerCamelCase_ : Tuple =np.random.RandomState(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[str] =self.get_dummy_inputs()
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Dict =np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Union[str, Any] =np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : str =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : Dict =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Optional[Any] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : str =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[int] =np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =self.get_dummy_inputs()
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : List[Any] =np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[int] =pipe(**snake_case__ )
lowerCamelCase_ : Dict =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : Any =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Union[str, Any] =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Any =text_inputs["input_ids"]
lowerCamelCase_ : Dict =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase_ : Union[str, Any] =prompt_embeds
# forward
lowerCamelCase_ : Tuple =pipe(**snake_case__ )
lowerCamelCase_ : List[str] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * ["this is a negative prompt"]
lowerCamelCase_ : Tuple =negative_prompt
lowerCamelCase_ : List[str] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : int =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : List[Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Tuple =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Dict =text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase_ , lowerCamelCase_ : int =embeds
# forward
lowerCamelCase_ : str =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =ort.SessionOptions()
lowerCamelCase_ : List[Any] =False
return options
def UpperCAmelCase__ ( self : Tuple ):
# using the PNDM scheduler by default
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] ="A painting of a squirrel eating a burger"
np.random.seed(0 )
lowerCamelCase_ : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] =np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="open neural network exchange"
lowerCamelCase_ : List[Any] =np.random.RandomState(0 )
lowerCamelCase_ : str =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any =np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] ="open neural network exchange"
lowerCamelCase_ : str =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict =np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =0
def test_callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : np.ndarray ) -> None:
lowerCamelCase_ : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : List[str] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCamelCase_ : Any =False
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict ="Andromeda galaxy in a bottle"
lowerCamelCase_ : Union[str, Any] =np.random.RandomState(0 )
pipe(
prompt=snake_case__ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case__ , callback=snake_case__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case__ , snake_case__ )
assert pipe.safety_checker is None
lowerCamelCase_ : Tuple =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Any =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 153 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_:
"""simple docstring"""
a_ : int
a_ : Node | None = None
a_ : Node | None = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : int = Node(1 )
_lowerCamelCase : Any = Node(2 )
_lowerCamelCase : Union[str, Any] = Node(3 )
_lowerCamelCase : Any = Node(4 )
_lowerCamelCase : int = Node(5 )
return tree
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
if root is None:
return output
_lowerCamelCase : str = deque([root] )
while process_queue:
_lowerCamelCase : Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase_ ( __a : Node | None , __a : int ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__a : Node | None , __a : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__a , __a )
return output
def UpperCAmelCase_ ( __a : Node | None , __a : int ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__a : Node | None , __a : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__a , __a )
return output
def UpperCAmelCase_ ( __a : Node | None ):
'''simple docstring'''
if root is None:
return []
_lowerCamelCase : list[Sequence[Node | None]] = []
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Tuple = height(__a )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__a , __a ) )
_lowerCamelCase : str = 1
else:
output.append(get_nodes_from_right_to_left(__a , __a ) )
_lowerCamelCase : Optional[int] = 0
return output
def UpperCAmelCase_ ( ): # Main function for testing.
'''simple docstring'''
_lowerCamelCase : List[Any] = make_tree()
print(f"In-order Traversal: {inorder(__a )}" )
print(f"Pre-order Traversal: {preorder(__a )}" )
print(f"Post-order Traversal: {postorder(__a )}" , '\n' )
print(f"Height of Tree: {height(__a )}" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__a ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(__a ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(__a , level=__a ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
"""simple docstring"""
from itertools import product
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :int = sides_number
lowerCAmelCase__ :Dict = max_face_number * dice_number
lowerCAmelCase__ :Optional[int] = [0] * (max_total + 1)
lowerCAmelCase__ :Dict = 1
lowerCAmelCase__ :str = range(_SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(_SCREAMING_SNAKE_CASE , repeat=_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[str] = sum(_SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def __A () ->float:
"""simple docstring"""
lowerCAmelCase__ :int = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ :Union[str, Any] = 0
lowerCAmelCase__ :Union[str, Any] = 9
lowerCAmelCase__ :List[Any] = 4 * 9
lowerCAmelCase__ :Optional[int] = 6
for peter_total in range(_SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ :Tuple = (4**9) * (6**6)
lowerCAmelCase__ :List[str] = peter_wins_count / total_games_number
lowerCAmelCase__ :List[str] = round(_SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 284 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = ['''image_processor''']
SCREAMING_SNAKE_CASE = '''SamImageProcessor'''
def __init__( self : Any , __UpperCamelCase : Tuple ):
super().__init__(__UpperCamelCase )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = -1_0
lowerCamelCase_ = self.image_processor.size["""longest_edge"""]
def __call__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : str=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Union[str, Any] , ):
lowerCamelCase_ = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase_ = encoding_image_processor["""original_sizes"""]
if hasattr(__UpperCamelCase , """numpy""" ): # Checks if Torch or TF tensor
lowerCamelCase_ = original_sizes.numpy()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._check_and_preprocess_points(
input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , )
lowerCamelCase_ = self._normalize_and_convert(
__UpperCamelCase , __UpperCamelCase , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , )
return encoding_image_processor
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : str="pt" , ):
if input_points is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase )
for point, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCamelCase_ , lowerCamelCase_ = self._pad_points_and_labels(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = np.array(__UpperCamelCase )
if input_labels is not None:
lowerCamelCase_ = np.array(__UpperCamelCase )
if input_boxes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] , is_bounding_box=__UpperCamelCase )
for box in input_boxes
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase , is_bounding_box=__UpperCamelCase )
for box, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
lowerCamelCase_ = np.array(__UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase )
# boxes batch size of 1 by default
lowerCamelCase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
# boxes batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
lowerCamelCase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
lowerCamelCase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowercase__ ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
lowerCamelCase_ = max([point.shape[0] for point in input_points] )
lowerCamelCase_ = []
for i, point in enumerate(__UpperCamelCase ):
if point.shape[0] != expected_nb_points:
lowerCamelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCamelCase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__UpperCamelCase )
lowerCamelCase_ = processed_input_points
return input_points, input_labels
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : np.ndarray , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=False ):
lowerCamelCase_ , lowerCamelCase_ = original_size
lowerCamelCase_ , lowerCamelCase_ = self.image_processor._get_preprocess_shape(__UpperCamelCase , longest_edge=__UpperCamelCase )
lowerCamelCase_ = deepcopy(__UpperCamelCase ).astype(__UpperCamelCase )
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 2 , 2 )
lowerCamelCase_ = coords[..., 0] * (new_w / old_w)
lowerCamelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 4 )
return coords
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=None , ):
if input_points is not None:
if hasattr(__UpperCamelCase , """numpy""" ): # Checks for TF or Torch tensor
lowerCamelCase_ = input_points.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_points[0] , __UpperCamelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
lowerCamelCase_ = [np.array(__UpperCamelCase ) for input_point in input_points]
else:
lowerCamelCase_ = None
if input_labels is not None:
if hasattr(__UpperCamelCase , """numpy""" ):
lowerCamelCase_ = input_labels.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_labels[0] , __UpperCamelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
lowerCamelCase_ = [np.array(__UpperCamelCase ) for label in input_labels]
else:
lowerCamelCase_ = None
if input_boxes is not None:
if hasattr(__UpperCamelCase , """numpy""" ):
lowerCamelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(__UpperCamelCase , __UpperCamelCase )
or not isinstance(input_boxes[0] , __UpperCamelCase )
or not isinstance(input_boxes[0][0] , __UpperCamelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
lowerCamelCase_ = [np.array(__UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
lowerCamelCase_ = None
return input_points, input_labels, input_boxes
@property
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(__UpperCamelCase ) )
def lowercase__ ( self : Tuple , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[int] ):
return self.image_processor.post_process_masks(*__UpperCamelCase , **__UpperCamelCase )
| 103 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt'''}
lowercase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowercase = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
lowerCamelCase_ = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
lowerCamelCase_ = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = token.rstrip("""\n""" )
lowerCamelCase_ = index
return vocab
class __A( UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]="<unk>" , __UpperCamelCase : List[str]=2_0_0 ):
lowerCamelCase_ = vocab
lowerCamelCase_ = unk_token
lowerCamelCase_ = max_input_chars_per_word
def lowercase__ ( self : int , __UpperCamelCase : int ):
lowerCamelCase_ = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ = 0
lowerCamelCase_ = []
while start < len(__UpperCamelCase ):
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = None
while start < end:
lowerCamelCase_ = """""".join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
lowerCamelCase_ = end
return sub_tokens
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = False
def __init__( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]="<d>" , __UpperCamelCase : List[str]="</d>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[Any]="</n>" , __UpperCamelCase : Tuple="</_>" , __UpperCamelCase : Optional[Any]="left" , **__UpperCamelCase : List[str] , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = bod_token
lowerCamelCase_ = eod_token
lowerCamelCase_ = load_vocab(__UpperCamelCase )
lowerCamelCase_ = self.encoder[space_token]
lowerCamelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase__ ( self : Any ):
return self.encoder[self.bod_token]
@property
def lowercase__ ( self : Any ):
return self.encoder[self.eod_token]
@property
def lowercase__ ( self : int ):
return self.encoder["\n"]
@property
def lowercase__ ( self : str ):
return len(self.encoder )
def lowercase__ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , **__UpperCamelCase : str ):
lowerCamelCase_ = [i for i in token_ids if i >= 0]
lowerCamelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] ):
return token in self.encoder
def lowercase__ ( self : int , __UpperCamelCase : List[str] ):
return "".join(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if os.path.isdir(__UpperCamelCase ):
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCamelCase_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
lowerCamelCase_ = 0
if " " in self.encoder:
lowerCamelCase_ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ = self.encoder["""\n"""]
del self.encoder["\n"]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
lowerCamelCase_ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowercase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 103 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 |
import numpy as np
def UpperCAmelCase ( UpperCAmelCase )-> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __A ):
snake_case__ : List[Any] = ['image_processor', 'tokenizer']
snake_case__ : Tuple = 'AutoImageProcessor'
snake_case__ : List[str] = 'AutoTokenizer'
def __init__( self : Dict , A__ : int , A__ : List[str] ):
"""simple docstring"""
super().__init__(A__ , A__ )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self : Dict , A__ : Union[str, Any]=None , A__ : int=None , A__ : str=None , **A__ : Dict ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowerCamelCase : Optional[int] = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
__lowerCamelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def a_ ( self : Union[str, Any] , *A__ : List[str] , **A__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__ )
def a_ ( self : Union[str, Any] , *A__ : Any , **A__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__ )
@property
def a_ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 708 |
'''simple docstring'''
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowerCamelCase : Optional[Any] = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
__lowerCamelCase : List[str] = int(sequence[i], 2 )
return sequence
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowerCamelCase : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowerCamelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
__lowerCamelCase : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowerCamelCase : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowerCamelCase : Optional[Any] = """1""" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 483 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( lowerCAmelCase_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> int:
lowerCAmelCase__ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=SCREAMING_SNAKE_CASE__ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=SCREAMING_SNAKE_CASE__ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , *SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = testing
lowerCAmelCase__ = testing_file
lowerCAmelCase__ = path
def a ( self : Union[str, Any] ) -> Tuple:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCAmelCase__ = (
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase__ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = configuration["lowercase_modelname"]
lowerCAmelCase__ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCAmelCase__ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Dict ):
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
# Create temp file
lowerCAmelCase__ , lowerCAmelCase__ = mkstemp()
lowerCAmelCase__ = False
with fdopen(SCREAMING_SNAKE_CASE__ , "w" ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
lowerCAmelCase__ = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
lowerCAmelCase__ = []
lowerCAmelCase__ = False
lowerCAmelCase__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase__ = line.split("\"" )[1]
lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase__ = line.split("\"" )[1]
lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase__ = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 61 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : str = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : str):
"""simple docstring"""
_A : List[Any] = len([g for position, g in enumerate(lowerCAmelCase) if g == main_target[position]])
return (item, float(lowerCAmelCase))
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : str):
"""simple docstring"""
_A : Optional[int] = random.randint(0 , len(lowerCAmelCase) - 1)
_A : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
_A : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : list[str]):
"""simple docstring"""
_A : Any = list(lowerCAmelCase)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_A : Dict = random.choice(lowerCAmelCase)
return "".join(lowerCAmelCase)
def lowercase ( lowerCAmelCase : tuple[str, float] , lowerCAmelCase : list[tuple[str, float]] , lowerCAmelCase : list[str] , ):
"""simple docstring"""
_A : List[Any] = []
# Generate more children proportionally to the fitness score.
_A : Any = int(parent_a[1] * 100) + 1
_A : Dict = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase):
_A : Tuple = population_score[random.randint(0 , lowerCAmelCase)][0]
_A , _A : List[Any] = crossover(parent_a[0] , lowerCAmelCase)
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase , lowerCAmelCase))
pop.append(mutate(lowerCAmelCase , lowerCAmelCase))
return pop
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : list[str] , lowerCAmelCase : bool = True):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_A : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowerCAmelCase)
# Verify that the target contains no genes besides the ones inside genes variable.
_A : Optional[int] = sorted({c for c in target if c not in genes})
if not_in_genes_list:
_A : str = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowerCAmelCase)
# Generate random starting population.
_A : Optional[int] = []
for _ in range(lowerCAmelCase):
population.append(''''''.join([random.choice(lowerCAmelCase) for i in range(len(lowerCAmelCase))]))
# Just some logs to know what the algorithms is doing.
_A , _A : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_A : Optional[int] = [evaluate(lowerCAmelCase , lowerCAmelCase) for item in population]
# Check if there is a matching evolution.
_A : Tuple = sorted(lowerCAmelCase , key=lambda lowerCAmelCase: x[1] , reverse=lowerCAmelCase)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""")
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_A : int = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(lowerCAmelCase)
# Normalize population score to be between 0 and 1.
_A : List[Any] = [
(item, score / len(lowerCAmelCase)) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase):
population.extend(select(population_score[int(lowerCAmelCase)] , lowerCAmelCase , lowerCAmelCase))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 417 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
UpperCAmelCase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 51 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A_( A : list[list[int]]):
UpperCamelCase = []
for i in range(len(lowercase_)):
UpperCamelCase = []
for j in range(len(cells[i])):
# Get the number of live neighbours
UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i]) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i]) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_) - 1 and j < len(cells[i]) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1)
else:
next_generation_row.append(0)
next_generation.append(lowercase_)
return next_generation
def A_( A : list[list[int]] , A : int):
UpperCamelCase = []
for _ in range(lowercase_):
# Create output image
UpperCamelCase = Image.new('RGB' , (len(cells[0]), len(lowercase_)))
UpperCamelCase = img.load()
# Save cells to image
for x in range(len(lowercase_)):
for y in range(len(cells[0])):
UpperCamelCase = 255 - cells[y][x] * 255
UpperCamelCase = (colour, colour, colour)
# Save image
images.append(lowercase_)
UpperCamelCase = new_generation(lowercase_)
return images
if __name__ == "__main__":
lowerCAmelCase = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 720 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = True , **A_ , )-> None:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = size if size is not None else {'shortest_edge': 224}
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 256, 'width': 256}
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_flip_channel_order
def UpperCAmelCase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> List[str]:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> np.ndarray:
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase = [self.flip_channel_order(image=A_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Dict:
'''simple docstring'''
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(A_ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=A_ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 432 | 0 |
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 100 , ) -> float:
UpperCAmelCase = x_start
UpperCAmelCase = fnc(lowercase__ )
UpperCAmelCase = 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def lowerCamelCase_(lowerCamelCase_ ) -> Any:
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__lowerCamelCase : Any = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 323 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCamelCase( lowercase__=None ) -> Optional[int]:
'''simple docstring'''
if subparsers is not None:
__lowercase= subparsers.add_parser('tpu-config' , description=_description )
else:
__lowercase= argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__lowercase= parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=lowercase__ , default=lowercase__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=lowercase__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=lowercase__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__lowercase= parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=lowercase__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase__ ):
__lowercase= load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase= defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase= defaults.commands
if not args.tpu_name:
__lowercase= defaults.tpu_name
if not args.tpu_zone:
__lowercase= defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase= 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__lowercase= 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , lowercase__ ):
__lowercase= F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__lowercase= [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowercase__ ):
__lowercase= [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase= ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__lowercase= '; '.join(lowercase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase= ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(lowercase__ )}' )
return
subprocess.run(lowercase__ )
print('Successfully setup pod.' )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= tpu_command_parser()
__lowercase= parser.parse_args()
tpu_command_launcher(lowercase__ )
| 230 | 0 |
from __future__ import annotations
from typing import Any
def UpperCamelCase_ ( __a ) -> int:
if not postfix_notation:
return 0
a__ : Any = {"+", "-", "*", "/"}
a__ : list[Any] = []
for token in postfix_notation:
if token in operations:
a__, a__ : int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : int = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCamelCase : Dict = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
_lowerCamelCase : List[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
_lowerCamelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> dict[str, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _a ( SCREAMING_SNAKE_CASE__ : tuple ) -> str:
'''simple docstring'''
return x[0]
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = get_letter_count(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "".join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE__ : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = get_frequency_order(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase = """swin"""
lowercase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(__lowerCAmelCase )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
UpperCamelCase = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
UpperCamelCase ,UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class a_ ( UpperCAmelCase__ ):
lowercase = version.parse("""1.11""" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 710 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 35 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE (__A , unittest.TestCase ):
"""simple docstring"""
_a : int = RoCBertTokenizer
_a : str = None
_a : List[Any] = False
_a : List[str] = True
_a : Dict = filter_non_english
def _a ( self ):
"""simple docstring"""
super().setUp()
a_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
a_ = {}
a_ = {}
for i, value in enumerate(UpperCamelCase__ ):
a_ = i
a_ = i
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a_ = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(UpperCamelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
"""simple docstring"""
a_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ):
"""simple docstring"""
a_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a_ = {}
for i, token in enumerate(UpperCamelCase__ ):
a_ = i
a_ = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
a_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
a_ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a_ = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
a_ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , 'do_lower_case' ) else False
a_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ):
"""simple docstring"""
a_ = ['的', '人', '有']
a_ = ''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ = True
a_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
a_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
a_ = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
a_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
a_ = False
a_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
a_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
a_ = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
a_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
a_ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _a ( self ):
"""simple docstring"""
a_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a_ = tokenizer.encode('你好' , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer.encode('你是谁' , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a_ = '你好,你是谁'
a_ = tokenizer.tokenize(UpperCamelCase__ )
a_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
a_ = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ )
a_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ )
a_ = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
a_ = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 536 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE (enum.Enum ):
"""simple docstring"""
_a : Tuple = 0
_a : List[str] = 1
_a : int = 2
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : str = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def _a ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
a_ = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['max_new_tokens']
else:
a_ = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
a_ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['attention_mask'][:, -keep_length:]
return inputs
def _a ( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
a_ = model_inputs['input_ids']
a_ = model_inputs.get('attention_mask' , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
a_ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ):
"""simple docstring"""
a_ = model_outputs['generated_sequence'][0]
a_ = model_outputs['input_ids']
a_ = model_outputs['prompt_text']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'generated_text': all_text}
records.append(UpperCamelCase__ )
return records
| 536 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__lowerCamelCase = None
def _a ( ):
a_ : Tuple = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__UpperCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__UpperCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( __UpperCamelCase ):
a_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : Union[str, Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _a ( __UpperCamelCase ):
def remove_articles(__UpperCamelCase ):
return ARTICLES_REGEX.sub(""" """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
a_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _a ( __UpperCamelCase ):
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def _a ( __UpperCamelCase , __UpperCamelCase ):
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = get_tokens(__UpperCamelCase )
a_ : str = get_tokens(__UpperCamelCase )
a_ : Tuple = collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
a_ : str = sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a_ : Any = 1.0 * num_same / len(__UpperCamelCase )
a_ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase )
a_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Tuple = {}
a_ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : str = qa["""id"""]
a_ : Dict = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a_ : Union[str, Any] = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a_ : int = preds[qid]
# Take max over all gold answers
a_ : Union[str, Any] = max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
a_ : int = max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[Any] = {}
for qid, s in scores.items():
a_ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
a_ : List[Any] = float(not qid_to_has_ans[qid] )
else:
a_ : List[Any] = s
return new_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
if not qid_list:
a_ : List[Any] = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
a_ : Tuple = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for k in new_eval:
a_ : Any = new_eval[k]
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
plt.step(__UpperCamelCase , __UpperCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ):
a_ : List[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
a_ : int = 0.0
a_ : Tuple = 1.0
a_ : Union[str, Any] = 0.0
a_ : List[str] = [1.0]
a_ : int = [0.0]
a_ : Optional[int] = 0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a_ : Union[str, Any] = true_pos / float(i + 1 )
a_ : Tuple = true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
a_ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a_ : Union[str, Any] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
a_ : Any = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
a_ : int = {k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
a_ : Dict = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_exact""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_f1""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_oracle""" )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not qid_list:
return
a_ : List[Any] = [na_probs[k] for k in qid_list]
a_ : List[Any] = np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__UpperCamelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a_ : Tuple = num_no_ans
a_ : List[Any] = cur_score
a_ : List[Any] = 0.0
a_ : int = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a_ : Optional[Any] = scores[qid]
else:
if preds[qid]:
a_ : Optional[int] = -1
else:
a_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a_ : str = cur_score
a_ : int = na_probs[qid]
return 1_00.0 * best_score / len(__UpperCamelCase ), best_thresh
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ , a_ : Optional[int] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ , a_ : Optional[Any] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ : Dict = best_exact
a_ : Optional[Any] = exact_thresh
a_ : Optional[Any] = best_fa
a_ : Union[str, Any] = fa_thresh
def _a ( ):
with open(OPTS.data_file ) as f:
a_ : str = json.load(__UpperCamelCase )
a_ : Optional[int] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
a_ : Dict = json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a_ : List[str] = json.load(__UpperCamelCase )
else:
a_ : List[str] = {k: 0.0 for k in preds}
a_ : Any = make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
a_ : Any = [k for k, v in qid_to_has_ans.items() if v]
a_ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a_ , a_ : List[str] = get_raw_scores(__UpperCamelCase , __UpperCamelCase )
a_ : int = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : Optional[int] = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
a_ : Dict = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """HasAns""" )
if no_ans_qids:
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 478 |
from heapq import heappop, heappush
import numpy as np
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
a_ , a_ : Any = grid.shape
a_ : Dict = [-1, 1, 0, 0]
a_ : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ : int = [(0, source)], set()
a_ : Optional[Any] = np.full((rows, cols) , np.inf )
a_ : Tuple = 0
a_ : Optional[Any] = np.empty((rows, cols) , dtype=__UpperCamelCase )
a_ : Optional[Any] = None
while queue:
((a_) , (a_)) : Any = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ : Tuple = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ : Any = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
a_ , a_ : Dict = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
a_ : List[str] = dist + 1
a_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase(a : Dict , a : Union[str, Any]=None ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
_SCREAMING_SNAKE_CASE =f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_SCREAMING_SNAKE_CASE =requests.get(a , headers=a ).json()
_SCREAMING_SNAKE_CASE ={}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_SCREAMING_SNAKE_CASE =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(a ):
_SCREAMING_SNAKE_CASE =requests.get(url + f"""&page={i + 2}""" , headers=a ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _lowerCAmelCase(a : int , a : List[Any]=None ) -> List[str]:
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
_SCREAMING_SNAKE_CASE =f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_SCREAMING_SNAKE_CASE =requests.get(a , headers=a ).json()
_SCREAMING_SNAKE_CASE ={}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
_SCREAMING_SNAKE_CASE =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(a ):
_SCREAMING_SNAKE_CASE =requests.get(url + f"""&page={i + 2}""" , headers=a ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _lowerCAmelCase(a : Tuple , a : Tuple , a : Tuple , a : List[str] ) -> List[Any]:
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
_SCREAMING_SNAKE_CASE =requests.get(a , headers=a , allow_redirects=a )
_SCREAMING_SNAKE_CASE =result.headers['''Location''']
_SCREAMING_SNAKE_CASE =requests.get(a , allow_redirects=a )
_SCREAMING_SNAKE_CASE =os.path.join(a , f"""{artifact_name}.zip""" )
with open(a , '''wb''' ) as fp:
fp.write(response.content )
def _lowerCAmelCase(a : List[str] , a : List[str]=None ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =None
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a ) as f:
for line in f:
_SCREAMING_SNAKE_CASE =line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_SCREAMING_SNAKE_CASE =line[: line.index(''': ''' )]
_SCREAMING_SNAKE_CASE =line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
_SCREAMING_SNAKE_CASE =line[len('''FAILED ''' ) :]
failed_tests.append(a )
elif filename == "job_name.txt":
_SCREAMING_SNAKE_CASE =line
if len(a ) != len(a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(a )} for `errors` """
f"""and {len(a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
_SCREAMING_SNAKE_CASE =None
if job_name and job_links:
_SCREAMING_SNAKE_CASE =job_links.get(a , a )
# A list with elements of the form (line of error, error, failed test)
_SCREAMING_SNAKE_CASE =[x + [y] + [job_link] for x, y in zip(a , a )]
return result
def _lowerCAmelCase(a : Optional[int] , a : Tuple=None ) -> Dict:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[os.path.join(a , a ) for p in os.listdir(a ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a , job_links=a ) )
return errors
def _lowerCAmelCase(a : Union[str, Any] , a : List[Any]=None ) -> List[Any]:
_SCREAMING_SNAKE_CASE =Counter()
counter.update([x[1] for x in logs] )
_SCREAMING_SNAKE_CASE =counter.most_common()
_SCREAMING_SNAKE_CASE ={}
for error, count in counts:
if error_filter is None or error not in error_filter:
_SCREAMING_SNAKE_CASE ={'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
_SCREAMING_SNAKE_CASE =dict(sorted(r.items() , key=lambda a : item[1]["count"] , reverse=a ) )
return r
def _lowerCAmelCase(a : Optional[int] ) -> int:
_SCREAMING_SNAKE_CASE =test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
_SCREAMING_SNAKE_CASE =test.split('''/''' )[2]
else:
_SCREAMING_SNAKE_CASE =None
return test
def _lowerCAmelCase(a : str , a : Any=None ) -> str:
_SCREAMING_SNAKE_CASE =[(x[0], x[1], get_model(x[2] )) for x in logs]
_SCREAMING_SNAKE_CASE =[x for x in logs if x[2] is not None]
_SCREAMING_SNAKE_CASE ={x[2] for x in logs}
_SCREAMING_SNAKE_CASE ={}
for test in tests:
_SCREAMING_SNAKE_CASE =Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_SCREAMING_SNAKE_CASE =counter.most_common()
_SCREAMING_SNAKE_CASE ={error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_SCREAMING_SNAKE_CASE =sum(error_counts.values() )
if n_errors > 0:
_SCREAMING_SNAKE_CASE ={'''count''': n_errors, '''errors''': error_counts}
_SCREAMING_SNAKE_CASE =dict(sorted(r.items() , key=lambda a : item[1]["count"] , reverse=a ) )
return r
def _lowerCAmelCase(a : Tuple ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE ='''| no. | error | status |'''
_SCREAMING_SNAKE_CASE ='''|-:|:-|:-|'''
_SCREAMING_SNAKE_CASE =[header, sep]
for error in reduced_by_error:
_SCREAMING_SNAKE_CASE =reduced_by_error[error]['''count''']
_SCREAMING_SNAKE_CASE =f"""| {count} | {error[:100]} | |"""
lines.append(a )
return "\n".join(a )
def _lowerCAmelCase(a : List[Any] ) -> int:
_SCREAMING_SNAKE_CASE ='''| model | no. of errors | major error | count |'''
_SCREAMING_SNAKE_CASE ='''|-:|-:|-:|-:|'''
_SCREAMING_SNAKE_CASE =[header, sep]
for model in reduced_by_model:
_SCREAMING_SNAKE_CASE =reduced_by_model[model]['''count''']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =list(reduced_by_model[model]['''errors'''].items() )[0]
_SCREAMING_SNAKE_CASE =f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(a )
return "\n".join(a )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
UpperCAmelCase_ : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase_ : Dict = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase_ : Optional[int] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase_ : List[Any] = k.find(''' / ''')
UpperCAmelCase_ : Dict = k[index + len(''' / ''') :]
UpperCAmelCase_ : Tuple = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase_ : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase_ : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase_ : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ : str = reduce_by_error(errors)
UpperCAmelCase_ : Union[str, Any] = reduce_by_model(errors)
UpperCAmelCase_ : Optional[Any] = make_github_table(reduced_by_error)
UpperCAmelCase_ : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 255 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : Tuple = '''Muhammad Umer Farooq'''
UpperCAmelCase_ : List[str] = '''MIT'''
UpperCAmelCase_ : Any = '''1.0.0'''
UpperCAmelCase_ : int = '''Muhammad Umer Farooq'''
UpperCAmelCase_ : Optional[int] = '''[email protected]'''
UpperCAmelCase_ : Tuple = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =domain
def UpperCamelCase_ ( self , _A , _A ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_SCREAMING_SNAKE_CASE =parse.urljoin(self.domain , _A )
self.urls.append(_A )
def _lowerCAmelCase(a : str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _lowerCAmelCase(a : str ) -> str:
return parse.urlparse(a ).netloc
def _lowerCAmelCase(a : str = "https://github.com" ) -> list[str]:
_SCREAMING_SNAKE_CASE =get_domain_name(a )
# Initialize the parser
_SCREAMING_SNAKE_CASE =Parser(a )
try:
# Open URL
_SCREAMING_SNAKE_CASE =requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_SCREAMING_SNAKE_CASE =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_SCREAMING_SNAKE_CASE =requests.get(a )
# Get the valid email.
_SCREAMING_SNAKE_CASE =re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = emails_from_url('''https://github.com''')
print(f"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 255 | 1 |
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : list[str] | None = None , UpperCamelCase_ : dict[str, float] | None = None , UpperCamelCase_ : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCAmelCase__ = cipher_alphabet or [chr(UpperCamelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ = decrypted_with_shift.lower().count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ = decrypted_with_shift.count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ = min(
UpperCamelCase_ , key=UpperCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 717 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCamelCase__ ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
pass
def _UpperCamelCase ( UpperCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__snake_case : List[str] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model=_UpperCamelCase , tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
lowerCAmelCase__ = 'What is the placebo?'
lowerCAmelCase__ = [
{
'image': load_image(_UpperCamelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = dqa_pipeline(_UpperCamelCase , top_k=2 )
self.assertEqual(
_UpperCamelCase , [
[
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'How many cats are there?'
lowerCAmelCase__ = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , words=_UpperCamelCase , boxes=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 365 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCamelCase( ):
raise RuntimeError("CUDA out of memory." )
class snake_case__ ( nn.Module ):
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def a__ ( self , lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__a , __a = mock_training_loop_function("hello" )
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase ):
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a__ ( self ):
__a = torch.cuda.memory_allocated()
__a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase )
__a = release_memory(lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase )
| 528 | """simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCamelCase( ):
raise RuntimeError("CUDA out of memory." )
class snake_case__ ( nn.Module ):
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def a__ ( self , lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
def a__ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__a , __a = mock_training_loop_function("hello" )
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase ):
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a__ ( self ):
__a = torch.cuda.memory_allocated()
__a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase )
__a = release_memory(lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase )
| 528 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__SCREAMING_SNAKE_CASE : Tuple = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__SCREAMING_SNAKE_CASE : Dict = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 720 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
assert isinstance(__snake_case , __snake_case ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_UpperCAmelCase = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__snake_case )
else:
_UpperCAmelCase = sylvester(number - 1 )
_UpperCAmelCase = num - 1
_UpperCAmelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}") | 108 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( a , a):
'''simple docstring'''
@register_to_config
def __init__( self , *,
__lowerCamelCase = 4 , __lowerCamelCase = 7_6_8 , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]:
super().__init__()
_A : Tuple = nn.Parameter(torch.zeros(__lowerCamelCase))
# parameters for additional clip time embeddings
_A : int = nn.Linear(__lowerCamelCase , __lowerCamelCase)
_A : Any = nn.Linear(__lowerCamelCase , __lowerCamelCase)
# parameters for encoder hidden states
_A : int = clip_extra_context_tokens
_A : Dict = nn.Linear(
__lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim)
_A : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase)
_A : Optional[int] = nn.LayerNorm(__lowerCamelCase)
def _lowerCamelCase ( self , *, __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_A : Optional[int] = image_embeddings.shape[0]
_A : Dict = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
_A : Optional[Any] = classifier_free_guidance_embeddings.expand(
__lowerCamelCase , -1)
_A : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_A : Optional[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_A : List[Any] = self.embedding_proj(__lowerCamelCase)
_A : Dict = self.clip_image_embeddings_project_to_time_embeddings(__lowerCamelCase)
_A : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_A : Dict = self.clip_extra_context_tokens_proj(__lowerCamelCase)
_A : int = clip_extra_context_tokens.reshape(__lowerCamelCase , -1 , self.clip_extra_context_tokens)
_A : Dict = clip_extra_context_tokens.permute(0 , 2 , 1)
_A : Any = self.encoder_hidden_states_proj(__lowerCamelCase)
_A : Optional[Any] = self.text_encoder_hidden_states_norm(__lowerCamelCase)
_A : List[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 503 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( _A : ArgumentParser ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 715 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
def __a ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
lowercase : Optional[int] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase : List[Any] = DDPMScheduler()
lowercase : Optional[int] = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A )
lowercase : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : List[str] = pipe(generator=_A , steps=4 )
lowercase : List[str] = output.audios[0]
lowercase : List[str] = output.images[0]
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : str = pipe(generator=_A , steps=4 , return_dict=_A )
lowercase : Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
lowercase : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase : List[Any] = DDIMScheduler()
lowercase : List[str] = self.dummy_vqvae_and_unet
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : int = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase : List[str] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Tuple = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=10 )
lowercase : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Dict = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = self.dummy_unet_condition
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : Dict = torch.rand((1, 1, 10) )
lowercase : Optional[int] = pipe(generator=_A , encoding=_A )
lowercase : int = output.images[0]
lowercase : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : Optional[int] = torch_device
lowercase : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowercase : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Dict = pipe(generator=_A )
lowercase : Union[str, Any] = output.audios[0]
lowercase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 596 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, Iterable[int]] , lowerCAmelCase_ : bool , lowerCAmelCase_ : int ):
"""simple docstring"""
def constraint_to_multiple_of(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[Any]=None ):
lowerCAmelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase__ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase__ = (output_size, output_size) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else output_size
lowerCAmelCase__ , lowerCAmelCase__ = get_image_size(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = output_size
# determine new height and width
lowerCAmelCase__ = output_height / input_height
lowerCAmelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase__ = scale_width
else:
# fit height
lowerCAmelCase__ = scale_height
lowerCAmelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase_ )
lowerCAmelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase_ )
return (new_height, new_width)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = keep_aspect_ratio
lowerCAmelCase__ = ensure_multiple_of
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowerCAmelCase__ = get_resize_output_image_size(
SCREAMING_SNAKE_CASE__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=SCREAMING_SNAKE_CASE__ , multiple=SCREAMING_SNAKE_CASE__ , )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Tuple] = None ) -> Dict:
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 61 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__SCREAMING_SNAKE_CASE : Dict =True
from torch.cuda.amp import autocast
__SCREAMING_SNAKE_CASE : Optional[int] =logging.getLogger(__name__)
def UpperCamelCase__ ( lowerCAmelCase__=None ,lowerCAmelCase__=None ):
return field(default_factory=lambda: default ,metadata=lowerCAmelCase__ )
@dataclass
class A_ :
_A :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A :Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A :Optional[bool] = field(
default=__a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_A :Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
_A :Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
_A :Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
_A :Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
_A :Optional[float] = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
_A :Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class A_ :
_A :Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A :Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_A :bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_A :Optional[int] = field(
default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A :Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A :Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
_A :List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class A_ :
_A :WavaVecaProcessor
_A :Union[bool, str] = True
_A :Optional[int] = None
_A :Optional[int] = None
_A :Optional[int] = None
_A :Optional[int] = None
def __call__( self : Any , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase = [{"""input_values""": feature["""input_values"""]} for feature in features]
lowercase = [{"""input_ids""": feature["""labels"""]} for feature in features]
lowercase = self.processor.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
lowercase = self.processor.pad(
labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
lowercase = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
lowercase = labels
return batch
class A_ ( __a ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
lowercase = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
lowercase = self.compute_loss(snake_case__ , snake_case__ )
else:
lowercase = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" ,lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase = datasets.load_dataset(
"""common_voice""" ,data_args.dataset_config_name ,split=data_args.train_split_name )
lowercase = datasets.load_dataset("""common_voice""" ,data_args.dataset_config_name ,split="""test""" )
# Create and save tokenizer
lowercase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase__ ):
lowercase = re.sub(lowerCAmelCase__ ,"""""" ,batch["""sentence"""] ).lower() + """ """
return batch
lowercase = train_dataset.map(lowerCAmelCase__ ,remove_columns=["""sentence"""] )
lowercase = eval_dataset.map(lowerCAmelCase__ ,remove_columns=["""sentence"""] )
def extract_all_chars(lowerCAmelCase__ ):
lowercase = """ """.join(batch["""text"""] )
lowercase = list(set(lowerCAmelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase = train_dataset.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,batch_size=-1 ,keep_in_memory=lowerCAmelCase__ ,remove_columns=train_dataset.column_names ,)
lowercase = train_dataset.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,batch_size=-1 ,keep_in_memory=lowerCAmelCase__ ,remove_columns=eval_dataset.column_names ,)
lowercase = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
lowercase = {v: k for k, v in enumerate(lowerCAmelCase__ )}
lowercase = vocab_dict[""" """]
del vocab_dict[" "]
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
with open("""vocab.json""" ,"""w""" ) as vocab_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = WavaVecaCTCTokenizer(
"""vocab.json""" ,unk_token="""[UNK]""" ,pad_token="""[PAD]""" ,word_delimiter_token="""|""" ,)
lowercase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0.0 ,do_normalize=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ )
lowercase = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ )
lowercase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,activation_dropout=model_args.activation_dropout ,attention_dropout=model_args.attention_dropout ,hidden_dropout=model_args.hidden_dropout ,feat_proj_dropout=model_args.feat_proj_dropout ,mask_time_prob=model_args.mask_time_prob ,gradient_checkpointing=training_args.gradient_checkpointing ,layerdrop=model_args.layerdrop ,ctc_loss_reduction="""mean""" ,pad_token_id=processor.tokenizer.pad_token_id ,vocab_size=len(processor.tokenizer ) ,)
if data_args.max_train_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) ,data_args.max_train_samples )
lowercase = train_dataset.select(range(lowerCAmelCase__ ) )
if data_args.max_val_samples is not None:
lowercase = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase = torchaudio.transforms.Resample(48_000 ,16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase__ ):
lowercase , lowercase = torchaudio.load(batch["""path"""] )
lowercase = resampler(lowerCAmelCase__ ).squeeze().numpy()
lowercase = 16_000
lowercase = batch["""text"""]
return batch
lowercase = train_dataset.map(
lowerCAmelCase__ ,remove_columns=train_dataset.column_names ,num_proc=data_args.preprocessing_num_workers ,)
lowercase = eval_dataset.map(
lowerCAmelCase__ ,remove_columns=eval_dataset.column_names ,num_proc=data_args.preprocessing_num_workers ,)
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
lowercase = processor(
audio=batch["""speech"""] ,text=batch["""target_text"""] ,sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCAmelCase__ )
return batch
lowercase = train_dataset.map(
lowerCAmelCase__ ,remove_columns=train_dataset.column_names ,batch_size=training_args.per_device_train_batch_size ,batched=lowerCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,)
lowercase = eval_dataset.map(
lowerCAmelCase__ ,remove_columns=eval_dataset.column_names ,batch_size=training_args.per_device_train_batch_size ,batched=lowerCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,)
# Metric
lowercase = datasets.load_metric("""wer""" )
def compute_metrics(lowerCAmelCase__ ):
lowercase = pred.predictions
lowercase = np.argmax(lowerCAmelCase__ ,axis=-1 )
lowercase = processor.tokenizer.pad_token_id
lowercase = processor.batch_decode(lowerCAmelCase__ )
# we do not want to group tokens when computing the metrics
lowercase = processor.batch_decode(pred.label_ids ,group_tokens=lowerCAmelCase__ )
lowercase = wer_metric.compute(predictions=lowerCAmelCase__ ,references=lowerCAmelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase = DataCollatorCTCWithPadding(processor=lowerCAmelCase__ ,padding=lowerCAmelCase__ )
# Initialize our Trainer
lowercase = CTCTrainer(
model=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,args=lowerCAmelCase__ ,compute_metrics=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=processor.feature_extractor ,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase = model_args.model_name_or_path
else:
lowercase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
lowercase = train_result.metrics
lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowercase = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics("""train""" ,lowerCAmelCase__ )
trainer.save_metrics("""train""" ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate()
lowercase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase__ )
lowercase = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics("""eval""" ,lowerCAmelCase__ )
trainer.save_metrics("""eval""" ,lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 718 |
# using dfs for finding eulerian path traversal
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , lowercase = True, True
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return path
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = 0
lowercase = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowercase = 1
if check == 2:
lowercase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
print(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase = {
1: [],
2: []
# all degree is zero
}
lowercase = 10
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 72 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__a :Tuple = logging.get_logger(__name__)
__a :Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Any = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__a :Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_lowerCamelCase : Tuple = RobertaTokenizer
def __init__( self : int , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]="replace" , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Optional[Any]="<s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[int]=True , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
A_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
A_ = add_prefix_space
A_ = pre_tok_class(**UpperCAmelCase )
A_ = add_prefix_space
A_ = "post_processor"
A_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
A_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ = tuple(state["sep"] )
if "cls" in state:
A_ = tuple(state["cls"] )
A_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
A_ = add_prefix_space
A_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
A_ = trim_offsets
A_ = True
if changes_to_apply:
A_ = getattr(UpperCAmelCase , state.pop("type" ) )
A_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def __A ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : List[Any] , UpperCAmelCase : str ):
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
A_ = value
def __A ( self : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 86 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( snake_case__ ):
def __init__( self : Union[str, Any] ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = data
def __iter__( self : Optional[Any] ):
"""simple docstring"""
for element in self.data:
yield element
def _lowerCamelCase( __snake_case=True ) -> str:
__snake_case = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> int:
if iterable:
__snake_case = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
__snake_case = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
__snake_case = DataLoader(__snake_case , batch_size=__snake_case )
__snake_case = accelerator.prepare(__snake_case )
return dl
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
__snake_case = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
__snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCamelCase( ) -> List[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
__snake_case = ddp_model(batch[0].float() )
__snake_case = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCamelCase( __snake_case ) -> Any:
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = train_dl.batch_sampler.even_batches
__snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__snake_case = accelerator.state.distributed_type
__snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
__snake_case = original_state
if __name__ == "__main__":
main()
| 524 | 0 |
"""simple docstring"""
_snake_case = 0 # The first color of the flag.
_snake_case = 1 # The second color of the flag.
_snake_case = 2 # The third color of the flag.
_snake_case = (red, white, blue)
def snake_case ( _a: list )-> list:
'''simple docstring'''
if not sequence:
return []
if len(_a ) == 1:
return list(_a )
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a ) - 1
lowerCamelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ = sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by commas:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 298 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=False , lowercase__=True , lowercase__="None" , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
SCREAMING_SNAKE_CASE : List[Any] = pos_att_type
SCREAMING_SNAKE_CASE : Any = scope
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Any:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.get_config()
SCREAMING_SNAKE_CASE : str = 300
return config
def _UpperCamelCase ( self , lowercase__ ) -> Optional[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
SCREAMING_SNAKE_CASE : Any = DebertaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , token_type_ids=lowercase__ )[0]
SCREAMING_SNAKE_CASE : int = model(lowercase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = DebertaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DebertaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = DebertaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = DebertaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : List[Any] = False
snake_case__ : List[str] = False
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Dict = DebertaModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase__ )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = DebertaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = DebertaModel.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 179 | '''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ ) -> None:
SCREAMING_SNAKE_CASE : List[str] = data
# Initialize hash values
SCREAMING_SNAKE_CASE : str = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE : List[Any] = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
SCREAMING_SNAKE_CASE : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> bytes:
SCREAMING_SNAKE_CASE : str = B'\x80' + (B'\x00' * (63 - (len(lowercase__ ) + 8) % 64))
SCREAMING_SNAKE_CASE : Union[str, Any] = struct.pack('>Q' , (len(lowercase__ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ) -> None:
# Convert into blocks of 64 bytes
SCREAMING_SNAKE_CASE : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(struct.unpack('>16L' , lowercase__ ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE : Dict = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE : Optional[int] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE : Any = self.ror(lowercase__ , 6 ) ^ self.ror(lowercase__ , 11 ) ^ self.ror(lowercase__ , 25 )
SCREAMING_SNAKE_CASE : Any = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE : Any = self.ror(lowercase__ , 2 ) ^ self.ror(lowercase__ , 13 ) ^ self.ror(lowercase__ , 22 )
SCREAMING_SNAKE_CASE : str = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE : Tuple = (sa + maj) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
SCREAMING_SNAKE_CASE : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE : Tuple = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE : Tuple = ''.join([hex(lowercase__ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> int:
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> None:
import hashlib
SCREAMING_SNAKE_CASE : Optional[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase__ ).hash , hashlib.shaaaa(lowercase__ ).hexdigest() )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
else:
SCREAMING_SNAKE_CASE : Any = bytes(a_ , 'utf-8' )
print(SHAaaa(a_ ).hash )
if __name__ == "__main__":
main()
| 179 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = b.T
UpperCamelCase = np.sum(np.square(__lowerCamelCase ) , axis=1 )
UpperCamelCase = np.sum(np.square(__lowerCamelCase ) , axis=0 )
UpperCamelCase = np.matmul(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
UpperCamelCase = x.reshape(-1 , 3 )
UpperCamelCase = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class __a ( __lowerCAmelCase ):
UpperCamelCase_ : List[str] = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Any , )-> List[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
UpperCamelCase = size if size is not None else {"height": 256, "width": 256}
UpperCamelCase = get_size_dict(UpperCAmelCase_ )
UpperCamelCase = np.array(UpperCAmelCase_ ) if clusters is not None else None
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_normalize
UpperCamelCase = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , )-> int:
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
UpperCAmelCase_ , size=(size["height"], size["width"]) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , )-> Any:
"""simple docstring"""
UpperCamelCase = rescale(image=UpperCAmelCase_ , scale=1 / 127.5 , data_format=UpperCAmelCase_ )
UpperCamelCase = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase_ : Union[str, Any] , )-> int:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCAmelCase_ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase = clusters if clusters is not None else self.clusters
UpperCamelCase = np.array(UpperCAmelCase_ )
UpperCamelCase = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCAmelCase_ ) for image in images]
if do_color_quantize:
UpperCamelCase = [to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase = np.array(UpperCAmelCase_ )
UpperCamelCase = color_quantize(UpperCAmelCase_ , UpperCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase = images.shape[0]
UpperCamelCase = images.reshape(UpperCAmelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase = list(UpperCAmelCase_ )
else:
UpperCamelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
UpperCamelCase = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 554 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> Optional[int]:
a__ = _distribute_shards(**__lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> int:
a__ = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : int ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCamelCase ):
_number_of_shards_in_gen_kwargs(__lowerCamelCase )
else:
a__ = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
assert out == expected
| 489 | 0 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Tuple = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase : Optional[int] = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase : Optional[Any] = '''image_segmenter'''
__lowerCAmelCase : int = CLIPSegForImageSegmentation
__lowerCAmelCase : List[str] = ['''image''', '''text''']
__lowerCAmelCase : Union[str, Any] = ['''image''']
def __init__( self : List[Any] , *a : Optional[int] , **a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*a , **a )
def _lowerCAmelCase ( self : Optional[int] , a : "Image" , a : str ) -> int:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=a , return_tensors='''pt''' )
def _lowerCAmelCase ( self : int , a : Dict ) -> Dict:
"""simple docstring"""
with torch.no_grad():
lowercase = self.model(**a ).logits
return logits
def _lowerCAmelCase ( self : Union[str, Any] , a : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase = outputs.cpu().detach().numpy()
lowercase = 0
lowercase = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 706 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCAmelCase :
def __init__( self : int , a : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase = len(a ) - 1
def _lowerCAmelCase ( self : str , a : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a ) , 5 ) == 1
return output_values
def _lowerCAmelCase ( self : Optional[Any] , a : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = self.basis_function(a )
lowercase = 0.0
lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCAmelCase ( self : Tuple , a : float = 0.01 ) -> int:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowercase = [] # x coordinates of points to plot
lowercase = [] # y coordinates of points to plot
lowercase = 0.0
while t <= 1:
lowercase = self.bezier_curve_function(a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase = [i[0] for i in self.list_of_points]
lowercase = [i[1] for i in self.list_of_points]
plt.plot(
a , a , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(a , a , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 396 | 0 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __UpperCamelCase :
lowercase_ : Optional[str] = field(default=UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
if self.train_file is not None:
lowerCAmelCase :int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase :int = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __UpperCamelCase :
lowercase_ : PreTrainedTokenizerBase
lowercase_ : Union[bool, str, PaddingStrategy] = True
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = None
def __call__( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase :Dict = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase :Any = [feature.pop(UpperCAmelCase ) for feature in features]
lowerCAmelCase :int = len(UpperCAmelCase )
lowerCAmelCase :int = len(features[0]['input_ids'] )
lowerCAmelCase :Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase )] for feature in features
]
lowerCAmelCase :Optional[Any] = list(chain(*UpperCAmelCase ) )
lowerCAmelCase :Any = self.tokenizer.pad(
UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase :Optional[int] = {k: v.view(UpperCAmelCase , UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase :List[Any] = torch.tensor(UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase :str = training_args.get_process_log_level()
logger.setLevel(a__ )
datasets.utils.logging.set_verbosity(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase :Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase :Union[str, Any] = {}
if data_args.train_file is not None:
lowerCAmelCase :Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase :str = data_args.validation_file
lowerCAmelCase :List[str] = data_args.train_file.split('.' )[-1]
lowerCAmelCase :Optional[int] = load_dataset(
a__ , data_files=a__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase :Dict = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase :Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase :List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase :Any = [F"""ending{i}""" for i in range(4 )]
lowerCAmelCase :str = 'sent1'
lowerCAmelCase :Optional[int] = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase :Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase :Tuple = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase :str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a__ ):
lowerCAmelCase :int = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase :List[str] = examples[question_header_name]
lowerCAmelCase :Tuple = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a__ )
]
# Flatten out
lowerCAmelCase :Any = list(chain(*a__ ) )
lowerCAmelCase :Dict = list(chain(*a__ ) )
# Tokenize
lowerCAmelCase :str = tokenizer(
a__ , a__ , truncation=a__ , max_length=a__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase :Optional[int] = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase :int = min(len(a__ ) , data_args.max_train_samples )
lowerCAmelCase :int = train_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase :Dict = train_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase :Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase :Any = min(len(a__ ) , data_args.max_eval_samples )
lowerCAmelCase :str = eval_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase :List[str] = eval_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase :List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a__ ):
lowerCAmelCase , lowerCAmelCase :List[str] = eval_predictions
lowerCAmelCase :Tuple = np.argmax(a__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase :List[str] = Trainer(
model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
# Training
if training_args.do_train:
lowerCAmelCase :Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase :Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase :Union[str, Any] = last_checkpoint
lowerCAmelCase :Optional[int] = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase :Tuple = train_result.metrics
lowerCAmelCase :Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
lowerCAmelCase :List[Any] = min(a__ , len(a__ ) )
trainer.log_metrics('train' , a__ )
trainer.save_metrics('train' , a__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase :Dict = trainer.evaluate()
lowerCAmelCase :Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ )
lowerCAmelCase :Tuple = min(a__ , len(a__ ) )
trainer.log_metrics('eval' , a__ )
trainer.save_metrics('eval' , a__ )
lowerCAmelCase :List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 553 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_3 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=1_6 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=4 ,) -> Dict:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def UpperCamelCase__ ( self ) -> str:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase_ ,)
return config, input_ids, attention_mask
def UpperCamelCase__ ( self ) -> str:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained("""distilbert-base-uncased""" )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
A = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
A = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,lowerCamelCase_ )
A = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase_ ,atol=1E-4 ) )
| 255 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''umt5'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self ,lowerCamelCase_=2_5_0_1_1_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=8 ,lowerCamelCase_=None ,lowerCamelCase_=6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=1.0 ,lowerCamelCase_="gated-gelu" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="T5Tokenizer" ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def UpperCamelCase__ ( self ) -> Dict:
return self.d_model
@property
def UpperCamelCase__ ( self ) -> Any:
return self.num_heads
@property
def UpperCamelCase__ ( self ) -> int:
return self.num_layers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ) -> int:
return 1_3
@property
def UpperCamelCase__ ( self ) -> float:
return 5E-4
| 255 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
__a ="""yolos"""
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=[512, 864] , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=100 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase__ )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = num_detection_tokens
__a = use_mid_position_embeddings
__a = auxiliary_loss
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
__a =version.parse("1.11" )
@property
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return 1e-4
@property
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
return 12 | 448 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : Any = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : List[str] = AutoModelForSequenceClassification
_A : Tuple = ["""text""", ["""text"""]]
_A : Any = ["""text"""]
def __UpperCamelCase (self ):
super().setup()
snake_case_ : List[Any] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case_ : Dict = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = outputs.logits
snake_case_ : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ : Union[str, Any] = random.Random()
def UpperCamelCase (lowercase_: str , lowercase_: Tuple=1.0 , lowercase_: Optional[Any]=None , lowercase_: List[Any]=None ) -> Optional[Any]:
if rng is None:
A__ : Union[str, Any] = global_rng
A__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=10 , A__=160 , A__=8 , A__=0.0 , A__=4000 , A__=False , A__=True , ):
A__ : Union[str, Any] = parent
A__ : Dict = batch_size
A__ : int = min_seq_length
A__ : Optional[Any] = max_seq_length
A__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : List[str] = padding_value
A__ : List[str] = sampling_rate
A__ : Optional[int] = return_attention_mask
A__ : Any = do_normalize
A__ : Dict = feature_size
A__ : Optional[Any] = chunk_length
A__ : Any = hop_length
def __A ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Tuple = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = WhisperFeatureExtractor if is_speech_available() else None
def __A ( self ):
A__ : Dict = WhisperFeatureExtractionTester(self )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(A__ )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : Optional[int] = feat_extract_first.mel_filters
A__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
A__ : str = self.feature_extraction_class.from_json_file(A__ )
A__ : Dict = feat_extract_first.to_dict()
A__ : Tuple = feat_extract_second.to_dict()
A__ : Union[str, Any] = feat_extract_first.mel_filters
A__ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : int = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
A__ : Union[str, Any] = feature_extractor(A__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : Optional[Any] = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[Any] = np.asarray(A__ )
A__ : List[str] = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test truncation required
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Tuple = [np.asarray(A__ ) for speech_input in speech_inputs]
A__ : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : List[str] = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self ):
import torch
A__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A__ ):
A__ : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Optional[int] = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
A__ : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Optional[Any] = WhisperFeatureExtractor()
A__ : Union[str, Any] = feature_extractor(A__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[Any] = self._load_datasamples(1 )[0]
A__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : int , *__magic_name__ : Any , **__magic_name__ : Any ):
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 48 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_a : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_a : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_a : Any = BeautifulSoup(res.text, 'html.parser')
_a : Optional[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 447 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
'''simple docstring'''
model.train()
lowercase = model(lowerCAmelCase__ )
lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
'''simple docstring'''
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(lowerCAmelCase__ )
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase = AdamW(params=model.parameters() ,lr=1E-3 )
lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 )
lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_training_setup(lowerCAmelCase__ )
# Use a single batch
lowercase = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def UpperCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_training_setup(lowerCAmelCase__ )
# Use a single batch
lowercase = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = Accelerator(
split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = Accelerator(
split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def UpperCamelCase__ ( ):
'''simple docstring'''
lowercase = Accelerator()
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
lowercase = RegressionDataset(length=96 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase__ ( ):
'''simple docstring'''
lowercase = Accelerator()
lowercase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 708 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any =OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase__ ( lowerCAmelCase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase = model_type_to_module_name(lowerCAmelCase__ )
lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" )
try:
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase = importlib.import_module("""transformers""" )
if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
return None
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,):
lowercase = get_file_from_repo(
lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader:
return json.load(lowerCAmelCase__ )
class A_ :
def __init__( self : List[Any] ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ):
lowercase = kwargs.pop("""config""" , snake_case__ )
lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ )
lowercase = True
lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ )
lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ )
lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.feature_extractor_type``
lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ )
if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase = feature_extractor_class_from_name(snake_case__ )
lowercase = feature_extractor_auto_map is not None
lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING
lowercase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowercase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowercase = kwargs.pop("""code_revision""" , snake_case__ )
if os.path.isdir(snake_case__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )]
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ):
FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
| 72 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.