code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import pow
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCamelCase = int(pow(_lowercase ,_lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
return current_sum, solutions_count
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_lowercase ,_lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
SCREAMING_SNAKE_CASE_ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string'''), '''prediction_text''': datasets.Value('''string''')},
'''references''': {
'''id''': datasets.Value('''string'''),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string'''),
'''answer_start''': datasets.Value('''int32'''),
}),
},
}) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_)
return score | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
if n_element < 1:
UpperCamelCase = ValueError('''a should be a positive number''' )
raise my_error
UpperCamelCase = [1]
UpperCamelCase , UpperCamelCase , UpperCamelCase = (0, 0, 0)
UpperCamelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
SCREAMING_SNAKE_CASE_ = hamming(int(n))
print('-----------------------------------------------------')
print(f'The list with nth numbers is: {hamming_numbers}')
print('-----------------------------------------------------') | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_) -> float:
return 0.0
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 512
UpperCamelCase = [1] + [0] * (size - 1)
UpperCamelCase = [filter_type.process(_lowercase ) for item in inputs]
UpperCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase = np.abs(np.fft.fft(_lowercase ) )
UpperCamelCase = 20 * np.logaa(_lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
UpperCamelCase = get_bounds(_lowercase ,_lowercase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(_lowercase )
plt.show()
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 512
UpperCamelCase = [1] + [0] * (size - 1)
UpperCamelCase = [filter_type.process(_lowercase ) for item in inputs]
UpperCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase = np.angle(np.fft.fft(_lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(_lowercase ,-2 * pi ) )
plt.show() | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = DebertaTokenizer
A_ = True
A_ = DebertaTokenizerFast
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''[UNK]'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = '''lower newer'''
UpperCamelCase = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = '''lower newer'''
UpperCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer('''Hello''' , '''World''')
UpperCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''')
UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
UpperCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_)
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
UpperCamelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''')
UpperCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_)
UpperCamelCase = [tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase = {
'''input_ids''': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , lowerCamelCase_)
for expected, decoded in zip(lowerCamelCase_ , lowerCamelCase_):
self.assertEqual(lowerCamelCase_ , lowerCamelCase_) | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''falcon'''
A_ = ['''past_key_values''']
def __init__( self , lowerCamelCase_=6_5_0_2_4 , lowerCamelCase_=4_5_4_4 , lowerCamelCase_=3_2 , lowerCamelCase_=7_1 , lowerCamelCase_=1e-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=1_1 , lowerCamelCase_=1_1 , **lowerCamelCase_ , ) -> Any:
UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase = kwargs.pop('''n_embed''' , lowerCamelCase_)
UpperCamelCase = hidden_size if n_embed is None else n_embed
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase = alibi
UpperCamelCase = new_decoder_architecture
UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase = parallel_attn
UpperCamelCase = bias
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> int:
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase__ ( self) -> str:
return not self.alibi | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for rt in rc.restypes:
UpperCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase = {name: i for i, name in enumerate(_lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase = torch.tensor(
_lowercase ,dtype=torch.intaa ,device=protein['''aatype'''].device ,)
UpperCamelCase = torch.tensor(
_lowercase ,dtype=torch.intaa ,device=protein['''aatype'''].device ,)
UpperCamelCase = torch.tensor(
_lowercase ,dtype=torch.floataa ,device=protein['''aatype'''].device ,)
UpperCamelCase = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase = restype_atomaa_mask[protein_aatype]
UpperCamelCase = residx_atomaa_mask
UpperCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase = rc.restype_atoa[restype_letter]
UpperCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase = rc.atom_order[atom_name]
UpperCamelCase = 1
UpperCamelCase = restype_atomaa_mask[protein_aatype]
UpperCamelCase = residx_atomaa_mask
return protein
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tree_map(lambda _lowercase : torch.tensor(_lowercase ,device=batch['''aatype'''].device ) ,_lowercase ,np.ndarray )
UpperCamelCase = tensor_tree_map(lambda _lowercase : np.array(_lowercase ) ,make_atomaa_masks(_lowercase ) )
return out | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Tuple:
UpperCamelCase = parent
UpperCamelCase = 1_3
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 9_9
UpperCamelCase = 3_8_4
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 3_7
UpperCamelCase = '''gelu'''
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 5_1_2
UpperCamelCase = 1_6
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 1_2_8
UpperCamelCase = 2
UpperCamelCase = 9
UpperCamelCase = 1
UpperCamelCase = None
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase = TFConvBertModel(config=lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = TFConvBertForMaskedLM(config=lowerCamelCase_)
UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForSequenceClassification(config=lowerCamelCase_)
UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = self.num_choices
UpperCamelCase = TFConvBertForMultipleChoice(config=lowerCamelCase_)
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForTokenClassification(config=lowerCamelCase_)
UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = TFConvBertForQuestionAnswering(config=lowerCamelCase_)
UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = TFConvBertModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = True
if hasattr(lowerCamelCase_ , '''use_cache'''):
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
UpperCamelCase = getattr(self.model_tester , '''key_length''' , lowerCamelCase_)
for model_class in self.all_model_classes:
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = len(model(lowerCamelCase_))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_)
UpperCamelCase = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''')
UpperCamelCase = tf.keras.models.load_model(lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_)
if self.is_encoder_decoder:
UpperCamelCase = outputs['''encoder_hidden_states''']
UpperCamelCase = outputs['''encoder_attentions''']
else:
UpperCamelCase = outputs['''hidden_states''']
UpperCamelCase = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowerCamelCase_) , lowerCamelCase_)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
self.assertIsNotNone(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length)
UpperCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
UpperCamelCase = getattr(self.model_tester , '''key_length''' , lowerCamelCase_)
UpperCamelCase = getattr(self.model_tester , '''key_length''' , lowerCamelCase_)
def check_decoder_attentions_output(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
self.assertEqual(out_len % 2 , 0)
UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_):
UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_))
UpperCamelCase = len(lowerCamelCase_)
self.assertEqual(config.output_hidden_states , lowerCamelCase_)
check_encoder_attentions_output(lowerCamelCase_)
if self.is_encoder_decoder:
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_))
self.assertEqual(config.output_hidden_states , lowerCamelCase_)
check_decoder_attentions_output(lowerCamelCase_)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_))
self.assertEqual(config.output_hidden_states , lowerCamelCase_)
check_encoder_attentions_output(lowerCamelCase_)
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_))
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_)
check_encoder_attentions_output(lowerCamelCase_)
@require_tf
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
UpperCamelCase = model(lowerCamelCase_)[0]
UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4) | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
A_ = '''CIDAS/clipseg-rd64-refined'''
A_ = '''image_segmenter'''
A_ = CLIPSegForImageSegmentation
A_ = ['''image''', '''text''']
A_ = ['''image''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''vision'''])
super().__init__(*lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors='''pt''')
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
with torch.no_grad():
UpperCamelCase = self.model(**lowerCamelCase_).logits
return logits
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = outputs.cpu().detach().numpy()
UpperCamelCase = 0
UpperCamelCase = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta)) | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''sentencepiece'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''sentencepiece''']) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __snake_case ( _lowercase ,_lowercase = "cpu" ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = torch.load(_lowercase ,map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase ,torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase = src_path
torch.save(_lowercase ,_lowercase )
if __name__ == "__main__":
fire.Fire(convert) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = IFPipeline
A_ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self) -> Optional[int]:
return self._get_dummy_components()
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Optional[int]:
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self) -> List[Any]:
self._test_save_load_local()
def UpperCAmelCase__ ( self) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> int:
# if
UpperCamelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
UpperCamelCase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
UpperCamelCase , UpperCamelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase = None
UpperCamelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase = IFImgaImgPipeline(**pipe_a.components)
UpperCamelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase = IFInpaintingPipeline(**pipe_a.components)
UpperCamelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(lowerCamelCase_)
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def __snake_case ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import inspect
import unittest
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase__ ( self) -> Union[str, Any]:
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase = inspect.getmembers(lowerCamelCase_ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase = '''k-diffusion'''
elif backend == "invisible_watermark":
UpperCamelCase = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!' | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained(_lowercase )
UpperCamelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowercase )
UpperCamelCase = checkpoints.load_tax_checkpoint(_lowercase )
UpperCamelCase = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCamelCase = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase = f'layers_{str(_lowercase )}'
# Self-Attention
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase = flax_model.params['''encoder''']['''block'''][str(_lowercase )]['''layer''']
UpperCamelCase = tax_attention_key
UpperCamelCase = tax_attention_out
UpperCamelCase = tax_attention_query
UpperCamelCase = tax_attention_value
UpperCamelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase = tax_mlp_wi_a
UpperCamelCase = tax_mlp_wi_a
else:
UpperCamelCase = tax_mlp_wi
UpperCamelCase = tax_mlp_wo
UpperCamelCase = tax_mlp_layer_norm
UpperCamelCase = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCamelCase = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCamelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase = f'layers_{str(_lowercase )}'
# Self-Attention
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCamelCase = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCamelCase = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCamelCase = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCamelCase = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase = flax_model.params['''decoder''']['''block'''][str(_lowercase )]['''layer''']
UpperCamelCase = tax_attention_key
UpperCamelCase = tax_attention_out
UpperCamelCase = tax_attention_query
UpperCamelCase = tax_attention_value
UpperCamelCase = tax_pre_attention_layer_norm
UpperCamelCase = tax_enc_dec_attention_key
UpperCamelCase = tax_enc_dec_attention_out
UpperCamelCase = tax_enc_dec_attention_query
UpperCamelCase = tax_enc_dec_attention_value
UpperCamelCase = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase = tax_mlp_wi_a
UpperCamelCase = tax_mlp_wi_a
else:
UpperCamelCase = tax_mlp_wi
UpperCamelCase = tax_mlp_wo
UpperCamelCase = txa_mlp_layer_norm
UpperCamelCase = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCamelCase = txa_decoder_norm
# Only for layer 0:
UpperCamelCase = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCamelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_lowercase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=6 , lowerCamelCase_=1_7 , lowerCamelCase_=2_3 , lowerCamelCase_=1_1 , lowerCamelCase_=True , ) -> Dict:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = act_dim
UpperCamelCase = state_dim
UpperCamelCase = hidden_size
UpperCamelCase = max_length
UpperCamelCase = is_training
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0)
UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self) -> str:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = DecisionTransformerModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (DecisionTransformerModel,) if is_torch_available() else ()
A_ = ()
A_ = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = DecisionTransformerModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DecisionTransformerModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_)
UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(lowerCamelCase_)] , lowerCamelCase_)
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase = 1_0 # defined by the RL environment, may be normalized
UpperCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
UpperCamelCase = model.to(lowerCamelCase_)
UpperCamelCase = model.config
torch.manual_seed(0)
UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=lowerCamelCase_ , dtype=torch.floataa) # env.reset()
UpperCamelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCamelCase_)
UpperCamelCase = torch.tensor(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.floataa).reshape(1 , 1 , 1)
UpperCamelCase = state
UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase_ , dtype=torch.floataa)
UpperCamelCase = torch.zeros(1 , 0 , device=lowerCamelCase_ , dtype=torch.floataa)
UpperCamelCase = torch.tensor(0 , device=lowerCamelCase_ , dtype=torch.long).reshape(1 , 1)
for step in range(lowerCamelCase_):
UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase_)] , dim=1)
UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase_)] , dim=1)
UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase = model(
states=lowerCamelCase_ , actions=lowerCamelCase_ , rewards=lowerCamelCase_ , returns_to_go=lowerCamelCase_ , timesteps=lowerCamelCase_ , attention_mask=lowerCamelCase_ , return_dict=lowerCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=lowerCamelCase_ , dtype=torch.floataa),
1.0,
False,
{},
)
UpperCamelCase = action_pred[0, -1]
UpperCamelCase = torch.cat([states, state] , dim=1)
UpperCamelCase = returns_to_go[0, -1] - reward
UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase_ , dtype=torch.long) * (step + 1)] , dim=1) | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , lowerCamelCase_ = 1_0_0_0 , lowerCamelCase_ = None) -> Optional[int]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCamelCase_)
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase = 4
# running values
UpperCamelCase = []
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> str:
UpperCamelCase = num_inference_steps
UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
UpperCamelCase = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
UpperCamelCase = torch.sin(steps * math.pi / 2) ** 2
UpperCamelCase = (1.0 - self.betas**2) ** 0.5
UpperCamelCase = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
UpperCamelCase = timesteps.to(lowerCamelCase_)
UpperCamelCase = []
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
UpperCamelCase = (self.timesteps == timestep).nonzero().item()
UpperCamelCase = timestep_index + 1
UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase_)
if len(self.ets) == 1:
UpperCamelCase = self.ets[-1]
elif len(self.ets) == 2:
UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
UpperCamelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCamelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase = self._get_prev_sample(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) -> torch.FloatTensor:
return sample
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = self.alphas[timestep_index]
UpperCamelCase = self.betas[timestep_index]
UpperCamelCase = self.alphas[prev_timestep_index]
UpperCamelCase = self.betas[prev_timestep_index]
UpperCamelCase = (sample - sigma * ets) / max(lowerCamelCase_ , 1e-8)
UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self) -> List[str]:
return self.config.num_train_timesteps | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
SCREAMING_SNAKE_CASE_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
SCREAMING_SNAKE_CASE_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
SCREAMING_SNAKE_CASE_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = random.randint(0 ,len(_lowercase ) - 1 )
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = list(_lowercase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
UpperCamelCase = random.choice(_lowercase )
return "".join(_lowercase )
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
UpperCamelCase = []
# Generate more children proportionally to the fitness score.
UpperCamelCase = int(parent_a[1] * 100 ) + 1
UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
UpperCamelCase = population_score[random.randint(0 ,_lowercase )][0]
UpperCamelCase , UpperCamelCase = crossover(parent_a[0] ,_lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase ,_lowercase ) )
pop.append(mutate(_lowercase ,_lowercase ) )
return pop
def __snake_case ( _lowercase ,_lowercase ,_lowercase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
UpperCamelCase = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowercase )
# Generate random starting population.
UpperCamelCase = []
for _ in range(_lowercase ):
population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase , UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase = [evaluate(_lowercase ,_lowercase ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase = sorted(_lowercase ,key=lambda _lowercase : x[1] ,reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
UpperCamelCase = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] ,_lowercase ,_lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
SCREAMING_SNAKE_CASE_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
) | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=_lowercase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=_lowercase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=_lowercase )
return parser.parse_args()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_lowercase )
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
SCREAMING_SNAKE_CASE_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
SCREAMING_SNAKE_CASE_ = 'UperNetConfig'
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = False , lowerCamelCase_ = 1 , ) -> None:
super().__init__()
UpperCamelCase = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , bias=lowerCamelCase_ , dilation=lowerCamelCase_ , )
UpperCamelCase = nn.BatchNormad(lowerCamelCase_)
UpperCamelCase = nn.ReLU()
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase = self.conv(lowerCamelCase_)
UpperCamelCase = self.batch_norm(lowerCamelCase_)
UpperCamelCase = self.activation(lowerCamelCase_)
return output
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
super().__init__()
UpperCamelCase = [
nn.AdaptiveAvgPoolad(lowerCamelCase_),
UperNetConvModule(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase = input
for layer in self.layers:
UpperCamelCase = layer(lowerCamelCase_)
return hidden_state
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
super().__init__()
UpperCamelCase = pool_scales
UpperCamelCase = align_corners
UpperCamelCase = in_channels
UpperCamelCase = channels
UpperCamelCase = []
for i, pool_scale in enumerate(lowerCamelCase_):
UpperCamelCase = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase_ , in_channels=lowerCamelCase_ , channels=lowerCamelCase_)
self.blocks.append(lowerCamelCase_)
self.add_module(str(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[torch.Tensor]:
UpperCamelCase = []
for ppm in self.blocks:
UpperCamelCase = ppm(lowerCamelCase_)
UpperCamelCase = nn.functional.interpolate(
lowerCamelCase_ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners)
ppm_outs.append(lowerCamelCase_)
return ppm_outs
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> Any:
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase = in_channels
UpperCamelCase = config.hidden_size
UpperCamelCase = False
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
UpperCamelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase = UperNetConvModule(lowerCamelCase_ , self.channels , kernel_size=1)
UpperCamelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(lowerCamelCase_)
self.fpn_convs.append(lowerCamelCase_)
UpperCamelCase = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
self.apply(self._init_weights)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if isinstance(lowerCamelCase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
UpperCamelCase = inputs[-1]
UpperCamelCase = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase_))
UpperCamelCase = torch.cat(lowerCamelCase_ , dim=1)
UpperCamelCase = self.bottleneck(lowerCamelCase_)
return output
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
# build laterals
UpperCamelCase = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(lowerCamelCase_))
# build top-down path
UpperCamelCase = len(lowerCamelCase_)
for i in range(used_backbone_levels - 1 , 0 , -1):
UpperCamelCase = laterals[i - 1].shape[2:]
UpperCamelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase_ , mode='''bilinear''' , align_corners=self.align_corners)
# build outputs
UpperCamelCase = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
UpperCamelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners)
UpperCamelCase = torch.cat(lowerCamelCase_ , dim=1)
UpperCamelCase = self.fpn_bottleneck(lowerCamelCase_)
UpperCamelCase = self.classifier(lowerCamelCase_)
return output
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 3 , lowerCamelCase_ = 1) -> None:
super().__init__()
UpperCamelCase = config
UpperCamelCase = config.auxiliary_in_channels
UpperCamelCase = config.auxiliary_channels
UpperCamelCase = config.auxiliary_num_convs
UpperCamelCase = config.auxiliary_concat_input
UpperCamelCase = in_index
UpperCamelCase = (kernel_size // 2) * dilation
UpperCamelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_))
if self.num_convs == 0:
UpperCamelCase = nn.Identity()
else:
UpperCamelCase = nn.Sequential(*lowerCamelCase_)
if self.concat_input:
UpperCamelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=kernel_size // 2)
UpperCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def UpperCAmelCase__ ( self) -> Optional[int]:
self.apply(self._init_weights)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
if isinstance(lowerCamelCase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
# just take the relevant feature maps
UpperCamelCase = encoder_hidden_states[self.in_index]
UpperCamelCase = self.convs(lowerCamelCase_)
if self.concat_input:
UpperCamelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
UpperCamelCase = self.classifier(lowerCamelCase_)
return output
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = UperNetConfig
A_ = '''pixel_values'''
A_ = True
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self) -> List[Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False) -> str:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = value
SCREAMING_SNAKE_CASE_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowerCamelCase_ , )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__(lowerCamelCase_)
UpperCamelCase = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
UpperCamelCase = UperNetHead(lowerCamelCase_ , in_channels=self.backbone.channels)
UpperCamelCase = UperNetFCNHead(lowerCamelCase_) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length'''))
@replace_return_docstrings(output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC)
def UpperCAmelCase__ ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , output_attentions=lowerCamelCase_)
UpperCamelCase = outputs.feature_maps
UpperCamelCase = self.decode_head(lowerCamelCase_)
UpperCamelCase = nn.functional.interpolate(lowerCamelCase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowerCamelCase_)
UpperCamelCase = None
if self.auxiliary_head is not None:
UpperCamelCase = self.auxiliary_head(lowerCamelCase_)
UpperCamelCase = nn.functional.interpolate(
lowerCamelCase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowerCamelCase_)
UpperCamelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''')
else:
# compute weighted loss
UpperCamelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase = (logits,) + outputs[1:]
else:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = dataset
UpperCamelCase = process
UpperCamelCase = params
def __len__( self) -> List[str]:
return len(self.dataset)
def __getitem__( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = self.dataset[i]
UpperCamelCase = self.process(lowerCamelCase_ , **self.params)
return processed
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> str:
UpperCamelCase = loader
UpperCamelCase = infer
UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase = None
UpperCamelCase = loader_batch_size
# Internal bookkeeping
UpperCamelCase = None
UpperCamelCase = None
def __len__( self) -> Optional[int]:
return len(self.loader)
def __iter__( self) -> List[Any]:
UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase__ ( self) -> str:
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
# Convert ModelOutput to tuple first
UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase_ , lowerCamelCase_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase = self._loader_batch_data.__class__(lowerCamelCase_)
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self) -> str:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase = next(self.iterator)
UpperCamelCase = self.infer(lowerCamelCase_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase_ , torch.Tensor):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys())[0]
UpperCamelCase = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase = processed
UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __iter__( self) -> Dict:
UpperCamelCase = iter(self.loader)
UpperCamelCase = None
return self
def UpperCAmelCase__ ( self) -> Optional[int]:
if self.subiterator is None:
UpperCamelCase = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
UpperCamelCase = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase = self.infer(next(self.iterator) , **self.params)
UpperCamelCase = next(self.subiterator)
return processed
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __iter__( self) -> Union[str, Any]:
UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase__ ( self) -> Tuple:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCamelCase = False
UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('''is_last''')
accumulator.append(lowerCamelCase_)
if is_last:
return accumulator
while not is_last:
UpperCamelCase = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase_ , torch.Tensor):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys())[0]
UpperCamelCase = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
UpperCamelCase = processed
UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('''is_last''')
accumulator.append(lowerCamelCase_)
if is_last:
return accumulator
else:
UpperCamelCase = processed
UpperCamelCase = item.pop('''is_last''')
accumulator.append(lowerCamelCase_)
return accumulator
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = dataset
UpperCamelCase = key
def __len__( self) -> str:
return len(self.dataset)
def __getitem__( self , lowerCamelCase_) -> Optional[Any]:
return self.dataset[i][self.key]
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = dataset
UpperCamelCase = keya
UpperCamelCase = keya
def __len__( self) -> Optional[Any]:
return len(self.dataset)
def __getitem__( self , lowerCamelCase_) -> str:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> List[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = True
A_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxRobertaModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase_)
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''')
self.register_modules(
speech_model=lowerCamelCase_ , speech_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_ = "auto") -> str:
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
self.enable_attention_slicing(lowerCamelCase_)
@torch.no_grad()
def __call__( self , lowerCamelCase_ , lowerCamelCase_=1_6_0_0_0 , lowerCamelCase_ = 5_1_2 , lowerCamelCase_ = 5_1_2 , lowerCamelCase_ = 5_0 , lowerCamelCase_ = 7.5 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = 1 , **lowerCamelCase_ , ) -> Any:
UpperCamelCase = self.speech_processor.feature_extractor(
lowerCamelCase_ , return_tensors='''pt''' , sampling_rate=lowerCamelCase_).input_features.to(self.device)
UpperCamelCase = self.speech_model.generate(lowerCamelCase_ , max_length=4_8_0_0_0_0)
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , normalize=lowerCamelCase_)[
0
]
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCamelCase_)}.')
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}')
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , lowerCamelCase_ , 1)
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [''''''] * batch_size
elif type(lowerCamelCase_) is not type(lowerCamelCase_):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_)} !='
F' {type(lowerCamelCase_)}.')
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCamelCase_):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_)}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , lowerCamelCase_ , 1)
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device='''cpu''' , dtype=lowerCamelCase_).to(
self.device)
else:
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_)
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
UpperCamelCase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_)):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_)
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2)
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = 1 / 0.1_8215 * latents
UpperCamelCase = self.vae.decode(lowerCamelCase_).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_) | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
UpperCamelCase = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
UpperCamelCase = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> bytes:
UpperCamelCase = B'''\x80''' + (B'''\x00''' * (6_3 - (len(lowerCamelCase_) + 8) % 6_4))
UpperCamelCase = struct.pack('''>Q''' , (len(lowerCamelCase_) * 8))
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self) -> None:
# Convert into blocks of 64 bytes
UpperCamelCase = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data) , 6_4)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack('''>16L''' , lowerCamelCase_))
# add 48 0-ed integers
words += [0] * 4_8
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.hashes
for index in range(0 , 6_4):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 1_5] , 7)
^ self.ror(words[index - 1_5] , 1_8)
^ (words[index - 1_5] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 1_7)
^ self.ror(words[index - 2] , 1_9)
^ (words[index - 2] >> 1_0)
)
UpperCamelCase = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
UpperCamelCase = self.ror(lowerCamelCase_ , 6) ^ self.ror(lowerCamelCase_ , 1_1) ^ self.ror(lowerCamelCase_ , 2_5)
UpperCamelCase = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
UpperCamelCase = self.ror(lowerCamelCase_ , 2) ^ self.ror(lowerCamelCase_ , 1_3) ^ self.ror(lowerCamelCase_ , 2_2)
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0X1_0000_0000
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase = ''''''.join([hex(lowerCamelCase_)[2:].zfill(8) for value in self.hashes])
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> int:
return 0XFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> None:
import hashlib
UpperCamelCase = bytes('''Test String''' , '''utf-8''')
self.assertEqual(SHAaaa(lowerCamelCase_).hash , hashlib.shaaaa(lowerCamelCase_).hexdigest())
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''-s''' ,'''--string''' ,dest='''input_string''' ,default='''Hello World!! Welcome to Cryptography''' ,help='''Hash the string''' ,)
parser.add_argument(
'''-f''' ,'''--file''' ,dest='''input_file''' ,help='''Hash contents of a file''' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'''rb''' ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(_lowercase ,'''utf-8''' )
print(SHAaaa(_lowercase ).hash )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __snake_case ( _lowercase ,_lowercase ,_lowercase = None ,):
"""simple docstring"""
UpperCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
UpperCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
UpperCamelCase = format_type
def __snake_case ( _lowercase ,_lowercase ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
SCREAMING_SNAKE_CASE_ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
SCREAMING_SNAKE_CASE_ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
SCREAMING_SNAKE_CASE_ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( _lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = get_format_type_from_alias(_lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' ) | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''data2vec-text'''
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
]) | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch''']) | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case_ :
"""simple docstring"""
A_ = None
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
UpperCamelCase = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0]
check_json_file_has_correct_format(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.feature_extraction_class()
self.assertIsNotNone(lowerCamelCase_) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCamelCase = len(lowerCamelCase_) - 1
def UpperCAmelCase__ ( self , lowerCamelCase_) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowerCamelCase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase_) , 5) == 1
return output_values
def UpperCAmelCase__ ( self , lowerCamelCase_) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = self.basis_function(lowerCamelCase_)
UpperCamelCase = 0.0
UpperCamelCase = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase__ ( self , lowerCamelCase_ = 0.01) -> List[Any]:
from matplotlib import pyplot as plt # type: ignore
UpperCamelCase = [] # x coordinates of points to plot
UpperCamelCase = [] # y coordinates of points to plot
UpperCamelCase = 0.0
while t <= 1:
UpperCamelCase = self.bezier_curve_function(lowerCamelCase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
UpperCamelCase = [i[0] for i in self.list_of_points]
UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase_ , lowerCamelCase_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''decision_transformer'''
A_ = ['''past_key_values''']
A_ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase_=1_7 , lowerCamelCase_=4 , lowerCamelCase_=1_2_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=3 , lowerCamelCase_=1 , lowerCamelCase_=None , lowerCamelCase_="relu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=5_0_2_5_6 , lowerCamelCase_=5_0_2_5_6 , lowerCamelCase_=False , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Any:
UpperCamelCase = state_dim
UpperCamelCase = act_dim
UpperCamelCase = hidden_size
UpperCamelCase = max_ep_len
UpperCamelCase = action_tanh
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = scale_attn_weights
UpperCamelCase = use_cache
UpperCamelCase = scale_attn_by_inverse_layer_idx
UpperCamelCase = reorder_and_upcast_attn
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
SCREAMING_SNAKE_CASE_ = 'zero2'
SCREAMING_SNAKE_CASE_ = 'zero3'
SCREAMING_SNAKE_CASE_ = [ZEROa, ZEROa]
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = parameterized.to_safe_name('''_'''.join(str(_lowercase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Dict:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Union[str, Any]:
UpperCamelCase = models[model]
UpperCamelCase = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_)
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Dict:
UpperCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowerCamelCase_)
UpperCamelCase = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCamelCase = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
UpperCamelCase = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
UpperCamelCase = self.get_launcher(lowerCamelCase_)
UpperCamelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_=False) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCamelCase = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = FunnelTokenizer
A_ = FunnelTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self) -> Any:
super().setUp()
UpperCamelCase = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> List[str]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
UpperCamelCase = '''UNwant\u00E9d,running'''
UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.tokenizer_class(self.vocab_file)
UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(lowerCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , [7, 4, 5, 1_0, 8, 9])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase_)
for tokenizer in tokenizers:
UpperCamelCase = tokenizer('''UNwant\u00E9d,running''')
UpperCamelCase = len(inputs['''input_ids''']) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len)
UpperCamelCase = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''')
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len) | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = data
def __iter__( self) -> Tuple:
for element in self.data:
yield element
def __snake_case ( _lowercase=True ):
"""simple docstring"""
UpperCamelCase = Accelerator(even_batches=_lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase = False ):
"""simple docstring"""
if iterable:
UpperCamelCase = DummyIterableDataset(torch.as_tensor(range(_lowercase ) ) )
else:
UpperCamelCase = TensorDataset(torch.as_tensor(range(_lowercase ) ) )
UpperCamelCase = DataLoader(_lowercase ,batch_size=_lowercase )
UpperCamelCase = accelerator.prepare(_lowercase )
return dl
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
UpperCamelCase = create_dataloader(accelerator=_lowercase ,dataset_size=_lowercase ,batch_size=_lowercase )
UpperCamelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1, 1] ,)
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 2] ,)
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=_lowercase )
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1] ,)
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 1] ,)
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
UpperCamelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_lowercase ):
UpperCamelCase = ddp_model(batch[0].float() )
UpperCamelCase = output.sum()
loss.backward()
batch_idxs.append(_lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __snake_case ( _lowercase ):
"""simple docstring"""
with warnings.catch_warnings(record=_lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category ,_lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
UpperCamelCase = train_dl.batch_sampler.even_batches
UpperCamelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 ,iterable=_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
UpperCamelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 ,iterable=_lowercase )
with warnings.catch_warnings(record=_lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
pass
assert issubclass(w[-1].category ,_lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCamelCase = accelerator.state.distributed_type
UpperCamelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowercase )
UpperCamelCase = original_state
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''image_processor''', '''tokenizer''']
A_ = '''AutoImageProcessor'''
A_ = '''AutoTokenizer'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
super().__init__(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = self.image_processor
def __call__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_) -> str:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_)
if images is not None:
UpperCamelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_)
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_) , tensor_type=lowerCamelCase_)
def UpperCAmelCase__ ( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return ["input_ids", "attention_mask", "pixel_values"] | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowerCamelCase_)
return image
@property
def UpperCAmelCase__ ( self) -> Optional[int]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ ( self) -> Tuple:
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self) -> Dict:
torch.manual_seed(0)
UpperCamelCase = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
def extract(*lowerCamelCase_ , **lowerCamelCase_):
class snake_case_ :
"""simple docstring"""
def __init__( self) -> Tuple:
UpperCamelCase = torch.ones([0])
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
self.pixel_values.to(lowerCamelCase_)
return self
return Out()
return extract
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
UpperCamelCase = 7_7
UpperCamelCase = self.dummy_image.to(lowerCamelCase_)
UpperCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase_)
UpperCamelCase = alt_pipe.to(lowerCamelCase_)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = '''A painting of a squirrel eating a burger'''
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(0)
UpperCamelCase = alt_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowerCamelCase_ , )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(0)
UpperCamelCase = alt_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
UpperCamelCase = 7_7
UpperCamelCase = self.dummy_image.to(lowerCamelCase_)
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase_)
UpperCamelCase = alt_pipe.to(lowerCamelCase_)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = '''A painting of a squirrel eating a burger'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = alt_pipe(
[prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , image=lowerCamelCase_ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase = init_image.resize((7_6_0, 5_0_4))
UpperCamelCase = '''BAAI/AltDiffusion'''
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''A fantasy landscape, trending on artstation'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
UpperCamelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
UpperCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
UpperCamelCase = init_image.resize((7_6_8, 5_1_2))
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
UpperCamelCase = '''BAAI/AltDiffusion'''
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''A fantasy landscape, trending on artstation'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2 | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
import math
import qiskit
def __snake_case ( _lowercase = 1 ,_lowercase = 1 ,_lowercase = 1 ):
"""simple docstring"""
if (
isinstance(_lowercase ,_lowercase )
or isinstance(_lowercase ,_lowercase )
or isinstance(_lowercase ,_lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 ,'''qr''' )
UpperCamelCase = qiskit.ClassicalRegister(2 ,'''cr''' )
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(_lowercase ,_lowercase )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,_lowercase ) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase = qiskit.execute(_lowercase ,_lowercase ,shots=1000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}') | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
UpperCamelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=_lowercase ,output_all_encodings=_lowercase ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,_lowercase ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCamelCase = os.path.join(get_home_dir() ,'''models''' )
UpperCamelCase = _load_vocab(_lowercase ,_lowercase ,_lowercase ,cls=_lowercase )
UpperCamelCase = nlp.model.BERTModel(
_lowercase ,len(_lowercase ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=_lowercase ,use_token_type_embed=_lowercase ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=_lowercase ,use_decoder=_lowercase ,)
original_bort.load_parameters(_lowercase ,cast_dtype=_lowercase ,ignore_extra=_lowercase )
UpperCamelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_lowercase ),
}
UpperCamelCase = BertConfig.from_dict(_lowercase )
UpperCamelCase = BertForMaskedLM(_lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowercase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowercase ,_lowercase ):
UpperCamelCase = hf_param.shape
UpperCamelCase = to_torch(params[gluon_param] )
UpperCamelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase = layer.attention.self
UpperCamelCase = check_and_map_params(
self_attn.key.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
UpperCamelCase = check_and_map_params(
self_attn.key.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
UpperCamelCase = check_and_map_params(
self_attn.query.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
UpperCamelCase = check_and_map_params(
self_attn.query.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
UpperCamelCase = check_and_map_params(
self_attn.value.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
UpperCamelCase = check_and_map_params(
self_attn.value.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
UpperCamelCase = layer.attention.output
UpperCamelCase = check_and_map_params(
self_output.dense.bias ,f'encoder.transformer_cells.{i}.proj.bias' )
UpperCamelCase = check_and_map_params(
self_output.dense.weight ,f'encoder.transformer_cells.{i}.proj.weight' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.layer_norm.beta' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
UpperCamelCase = layer.intermediate
UpperCamelCase = check_and_map_params(
intermediate.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
UpperCamelCase = check_and_map_params(
intermediate.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
UpperCamelCase = layer.output
UpperCamelCase = check_and_map_params(
bert_output.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
UpperCamelCase = check_and_map_params(
bert_output.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCamelCase = tokenizer.encode_plus(_lowercase )['''input_ids''']
# Get gluon output
UpperCamelCase = mx.nd.array([input_ids] )
UpperCamelCase = original_bort(inputs=_lowercase ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowercase )
UpperCamelCase = BertModel.from_pretrained(_lowercase )
hf_bort_model.eval()
UpperCamelCase = tokenizer.encode_plus(_lowercase ,return_tensors='''pt''' )
UpperCamelCase = hf_bort_model(**_lowercase )[0]
UpperCamelCase = output_gluon[0].asnumpy()
UpperCamelCase = output_hf[0].detach().numpy()
UpperCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase = np.allclose(_lowercase ,_lowercase ,atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' ,_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
if n == 1 or not isinstance(_lowercase ,_lowercase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 2
while digits < n:
index += 1
UpperCamelCase = len(str(fibonacci(_lowercase ) ) )
return index
def __snake_case ( _lowercase = 1000 ):
"""simple docstring"""
return fibonacci_digits_index(_lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE_ = True
except ImportError:
SCREAMING_SNAKE_CASE_ = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE_ = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE_ = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
SCREAMING_SNAKE_CASE_ = os.path.join(torch_cache_home, 'transformers')
SCREAMING_SNAKE_CASE_ = 'https://cdn.huggingface.co'
SCREAMING_SNAKE_CASE_ = 'https://s3.amazonaws.com/models.huggingface.co/bert'
SCREAMING_SNAKE_CASE_ = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'config.yaml')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'attributes.txt')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'objects.txt')
SCREAMING_SNAKE_CASE_ = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
SCREAMING_SNAKE_CASE_ = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE_ = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE_ = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE_ = 'config.yaml'
def __snake_case ( _lowercase=OBJECTS ,_lowercase=ATTRIBUTES ):
"""simple docstring"""
UpperCamelCase = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
UpperCamelCase = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
with open(_lowercase ,'''rb''' ) as f:
UpperCamelCase = pkl.load(_lowercase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase = ckp.pop(_lowercase )
if isinstance(_lowercase ,np.ndarray ):
UpperCamelCase = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase ,torch.tensor ), type(_lowercase )
UpperCamelCase = v
return r
class snake_case_ :
"""simple docstring"""
A_ = {}
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "root" , lowerCamelCase_=0) -> str:
UpperCamelCase = name
UpperCamelCase = level
UpperCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase = copy.deepcopy(lowerCamelCase_)
UpperCamelCase = copy.deepcopy(lowerCamelCase_)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = Config(lowerCamelCase_ , name=lowerCamelCase_ , level=level + 1)
UpperCamelCase = v
setattr(self , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = d
def __repr__( self) -> List[Any]:
return str(list((self._pointer.keys())))
def __setattr__( self , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = key.split('''.''')
UpperCamelCase = len(lowerCamelCase_) - 1
UpperCamelCase = self._pointer
if len(lowerCamelCase_) > 1:
for i, l in enumerate(lowerCamelCase_):
if hasattr(self , lowerCamelCase_) and isinstance(getattr(self , lowerCamelCase_) , lowerCamelCase_):
setattr(getattr(self , lowerCamelCase_) , '''.'''.join(levels[i:]) , lowerCamelCase_)
if l == last_level:
UpperCamelCase = val
else:
UpperCamelCase = pointer[l]
def UpperCAmelCase__ ( self) -> Any:
return self._pointer
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
with open(F'{file_name}' , '''w''') as stream:
dump(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
with open(F'{file_name}' , '''w''') as stream:
json.dump(lowerCamelCase_ , lowerCamelCase_)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> Optional[Any]:
with open(lowerCamelCase_) as stream:
UpperCamelCase = load(lowerCamelCase_ , Loader=lowerCamelCase_)
return data
def __str__( self) -> List[Any]:
UpperCamelCase = ''' '''
if self._name != "root":
UpperCamelCase = F'{t * (self._level-1)}{self._name}:\n'
else:
UpperCamelCase = ''''''
UpperCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowerCamelCase_).__name__})\n'
UpperCamelCase = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
UpperCamelCase , UpperCamelCase = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_)
return cls(lowerCamelCase_)
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , **lowerCamelCase_) -> Dict:
UpperCamelCase = kwargs.pop('''cache_dir''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''force_download''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''resume_download''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''proxies''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''local_files_only''' , lowerCamelCase_)
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_)
elif os.path.isfile(lowerCamelCase_) or is_remote_url(lowerCamelCase_):
UpperCamelCase = pretrained_model_name_or_path
else:
UpperCamelCase = hf_bucket_url(lowerCamelCase_ , filename=lowerCamelCase_ , use_cdn=lowerCamelCase_)
try:
# Load from URL or cache if already cached
UpperCamelCase = cached_path(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase = Config.load_yaml(lowerCamelCase_)
except EnvironmentError:
UpperCamelCase = '''Can\'t load config for'''
raise EnvironmentError(lowerCamelCase_)
if resolved_config_file == config_file:
print('''loading configuration file from path''')
else:
print('''loading configuration file cache''')
return Config.load_yaml(lowerCamelCase_), kwargs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = torch.load('''dump.pt''' ,map_location=in_tensor.device )
UpperCamelCase = in_tensor.numpy()
UpperCamelCase = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_lowercase ,_lowercase ,rtol=0.01 ,atol=0.1 ), (
f'{sum([1 for x in np.isclose(_lowercase ,_lowercase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def __snake_case ( _lowercase ,_lowercase ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def __snake_case ( _lowercase ,_lowercase ,_lowercase=None ,_lowercase=0 ,_lowercase=None ,):
"""simple docstring"""
UpperCamelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase ,_lowercase ):
ua += "; " + "; ".join('''{}/{}'''.format(_lowercase ,_lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase ,_lowercase ):
ua += "; " + user_agent
UpperCamelCase = {'''user-agent''': ua}
if resume_size > 0:
UpperCamelCase = '''bytes=%d-''' % (resume_size,)
UpperCamelCase = requests.get(_lowercase ,stream=_lowercase ,proxies=_lowercase ,headers=_lowercase )
if response.status_code == 416: # Range not satisfiable
return
UpperCamelCase = response.headers.get('''Content-Length''' )
UpperCamelCase = resume_size + int(_lowercase ) if content_length is not None else None
UpperCamelCase = tqdm(
unit='''B''' ,unit_scale=_lowercase ,total=_lowercase ,initial=_lowercase ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=10 ,_lowercase=False ,_lowercase=None ,_lowercase=False ,):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
os.makedirs(_lowercase ,exist_ok=_lowercase )
UpperCamelCase = None
if not local_files_only:
try:
UpperCamelCase = requests.head(_lowercase ,allow_redirects=_lowercase ,proxies=_lowercase ,timeout=_lowercase )
if response.status_code == 200:
UpperCamelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase = url_to_filename(_lowercase ,_lowercase )
# get cache path to put the file
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
UpperCamelCase = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase = cache_path + '''.lock'''
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowercase ,'''a+b''' ) as f:
yield f
UpperCamelCase = _resumable_file_manager
if os.path.exists(_lowercase ):
UpperCamelCase = os.stat(_lowercase ).st_size
else:
UpperCamelCase = 0
else:
UpperCamelCase = partial(tempfile.NamedTemporaryFile ,dir=_lowercase ,delete=_lowercase )
UpperCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,_lowercase ,temp_file.name ,)
http_get(
_lowercase ,_lowercase ,proxies=_lowercase ,resume_size=_lowercase ,user_agent=_lowercase ,)
os.replace(temp_file.name ,_lowercase )
UpperCamelCase = {'''url''': url, '''etag''': etag}
UpperCamelCase = cache_path + '''.json'''
with open(_lowercase ,'''w''' ) as meta_file:
json.dump(_lowercase ,_lowercase )
return cache_path
def __snake_case ( _lowercase ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = url.encode('''utf-8''' )
UpperCamelCase = shaaaa(_lowercase )
UpperCamelCase = url_hash.hexdigest()
if etag:
UpperCamelCase = etag.encode('''utf-8''' )
UpperCamelCase = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=False ,_lowercase=False ,_lowercase=False ,):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase = get_from_cache(
_lowercase ,cache_dir=_lowercase ,force_download=_lowercase ,proxies=_lowercase ,resume_download=_lowercase ,user_agent=_lowercase ,local_files_only=_lowercase ,)
elif os.path.exists(_lowercase ):
# File, and it exists.
UpperCamelCase = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(_lowercase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase , UpperCamelCase = os.path.split(_lowercase )
UpperCamelCase = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase = output_path + '''.lock'''
with FileLock(_lowercase ):
shutil.rmtree(_lowercase ,ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase ,'''r''' ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
UpperCamelCase = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(_lowercase ) )
return output_path_extracted
return output_path
def __snake_case ( _lowercase ,_lowercase="," ):
"""simple docstring"""
assert isinstance(_lowercase ,_lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
UpperCamelCase = eval(f.read() )
else:
UpperCamelCase = requests.get(_lowercase )
try:
UpperCamelCase = requests.json()
except Exception:
UpperCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase = eval(_lowercase )
except Exception:
UpperCamelCase = data.split('''\n''' )
req.close()
return data
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = requests.get(_lowercase )
UpperCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase ,'''rb''' ) as stream:
UpperCamelCase = pkl.load(_lowercase )
UpperCamelCase = weights.pop('''model''' )
UpperCamelCase = {}
for k, v in model.items():
UpperCamelCase = torch.from_numpy(_lowercase )
if "running_var" in k:
UpperCamelCase = torch.tensor([0] )
UpperCamelCase = k.replace('''running_var''' ,'''num_batches_tracked''' )
UpperCamelCase = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'{os.path.abspath(os.path.join(_lowercase ,os.pardir ) )}/demo.ipynb' )
def __snake_case ( _lowercase ,_lowercase="RGB" ):
"""simple docstring"""
assert isinstance(_lowercase ,_lowercase )
if os.path.isfile(_lowercase ):
UpperCamelCase = cva.imread(_lowercase )
else:
UpperCamelCase = get_image_from_url(_lowercase )
assert img is not None, f'could not connect to: {im}'
UpperCamelCase = cva.cvtColor(_lowercase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase = img[:, :, ::-1]
return img
def __snake_case ( _lowercase ,_lowercase=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(_lowercase ) ,_lowercase )) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
UpperCamelCase = [[] for _ in range(lowerCamelCase_)]
UpperCamelCase = size
def __getitem__( self , lowerCamelCase_) -> Iterator[Edge]:
return iter(self._graph[vertex])
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return self._size
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> int | None:
UpperCamelCase = deque([start_vertex])
UpperCamelCase = [None] * self.size
UpperCamelCase = 0
while queue:
UpperCamelCase = queue.popleft()
UpperCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase = current_distance + edge.weight
UpperCamelCase = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase_ , lowerCamelCase_)
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __snake_case ( _lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(_lowercase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case_ :
"""simple docstring"""
A_ = OPTConfig
A_ = {}
A_ = '''gelu'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=9_9 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=2_0 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=1_6 , lowerCamelCase_=1_6 , ) -> Union[str, Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = embed_dim
UpperCamelCase = word_embed_proj_dim
UpperCamelCase = False
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase_ , **self.config_updates , )
UpperCamelCase = prepare_opt_inputs_dict(lowerCamelCase_ , lowerCamelCase_)
return config, inputs_dict
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = TFOPTModel(config=lowerCamelCase_)
UpperCamelCase = inputs_dict['''input_ids''']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_)
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1)
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-3)
@require_tf
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = 10
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = TFOPTModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_ , lowerCamelCase_):
if hasattr(lowerCamelCase_ , '''weight'''):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_ , '''weight'''):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
UpperCamelCase = model_class(config=lowerCamelCase_)
UpperCamelCase = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings())
UpperCamelCase = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_)
UpperCamelCase = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings())
UpperCamelCase = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase_)
# check that weights remain the same after resizing
UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
UpperCamelCase = False
self.assertTrue(lowerCamelCase_)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase_)
UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
UpperCamelCase = False
self.assertTrue(lowerCamelCase_)
def __snake_case ( _lowercase ):
"""simple docstring"""
return tf.constant(_lowercase ,dtype=tf.intaa )
@require_tf
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
A_ = 99
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa) * 2
UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''')
UpperCamelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = tf.not_equal(lowerCamelCase_ , model.config.pad_token_id)
with tf.GradientTape():
UpperCamelCase = model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_).last_hidden_state
UpperCamelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-3))
UpperCamelCase = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_)
UpperCamelCase = xla_generate(lowerCamelCase_ , lowerCamelCase_)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-2))
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().setUp()
UpperCamelCase = '''facebook/opt-350m'''
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model)
UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model)
UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''tf''' , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
UpperCamelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
])
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4))
UpperCamelCase = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_)
UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4))
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Tuple:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''facebook/opt-125m'''
UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(lowerCamelCase_)
UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowerCamelCase_)
for prompt in self.prompts:
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''tf''').input_ids
UpperCamelCase = model.generate(lowerCamelCase_ , max_length=1_0)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = '''facebook/opt-350m'''
UpperCamelCase = GPTaTokenizer.from_pretrained(lowerCamelCase_)
UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowerCamelCase_)
UpperCamelCase = '''left'''
# use different length sentences to test batching
UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''tf''' , padding=lowerCamelCase_)
UpperCamelCase = inputs['''input_ids''']
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs['''attention_mask'''])
UpperCamelCase = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_)
UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa))
UpperCamelCase = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_)
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_)
UpperCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''facebook/opt-350m'''
UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(lowerCamelCase_)
UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowerCamelCase_)
for prompt in self.prompts:
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''tf''').input_ids
UpperCamelCase = model.generate(lowerCamelCase_ , max_length=1_0)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''umt5'''
A_ = ['''past_key_values''']
def __init__( self , lowerCamelCase_=2_5_0_1_1_2 , lowerCamelCase_=5_1_2 , lowerCamelCase_=6_4 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=8 , lowerCamelCase_=None , lowerCamelCase_=6 , lowerCamelCase_=3_2 , lowerCamelCase_=1_2_8 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=1.0 , lowerCamelCase_="gated-gelu" , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="T5Tokenizer" , lowerCamelCase_=True , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=0 , **lowerCamelCase_ , ) -> int:
super().__init__(
is_encoder_decoder=lowerCamelCase_ , tokenizer_class=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = d_kv
UpperCamelCase = d_ff
UpperCamelCase = num_layers
UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase = num_heads
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = relative_attention_max_distance
UpperCamelCase = dropout_rate
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_factor
UpperCamelCase = feed_forward_proj
UpperCamelCase = use_cache
UpperCamelCase = self.feed_forward_proj.split('''-''')
UpperCamelCase = act_info[-1]
UpperCamelCase = act_info[0] == '''gated'''
if len(lowerCamelCase_) > 1 and act_info[0] != "gated" or len(lowerCamelCase_) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
if feed_forward_proj == "gated-gelu":
UpperCamelCase = '''gelu_new'''
@property
def UpperCAmelCase__ ( self) -> Tuple:
return self.d_model
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return self.num_heads
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.num_layers
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCamelCase = '''past_encoder_sequence + sequence'''
UpperCamelCase = {0: '''batch'''}
UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase__ ( self) -> int:
return 1_3
@property
def UpperCAmelCase__ ( self) -> float:
return 5e-4 | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
def constraint_to_multiple_of(_lowercase ,_lowercase ,_lowercase=0 ,_lowercase=None ):
UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase = (output_size, output_size) if isinstance(_lowercase ,_lowercase ) else output_size
UpperCamelCase , UpperCamelCase = get_image_size(_lowercase )
UpperCamelCase , UpperCamelCase = output_size
# determine new height and width
UpperCamelCase = output_height / input_height
UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase = scale_width
else:
# fit height
UpperCamelCase = scale_height
UpperCamelCase = constraint_to_multiple_of(scale_height * input_height ,multiple=_lowercase )
UpperCamelCase = constraint_to_multiple_of(scale_width * input_width ,multiple=_lowercase )
return (new_height, new_width)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_5_5 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = keep_aspect_ratio
UpperCamelCase = ensure_multiple_of
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
UpperCamelCase = get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
UpperCamelCase = get_resize_output_image_size(
lowerCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=lowerCamelCase_ , multiple=lowerCamelCase_ , )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Any:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(lowerCamelCase_)
if not valid_images(lowerCamelCase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase_) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_) for image in images]
UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Optional[int]:
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase_) != len(lowerCamelCase_):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(lowerCamelCase_):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(lowerCamelCase_)):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase_)
UpperCamelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCamelCase_)
else:
UpperCamelCase = logits.argmax(dim=1)
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
pass
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = np.array(_lowercase )
UpperCamelCase = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
A_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = MaskGenerationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''')
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''')
UpperCamelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6)
# Shortening by hashing
UpperCamelCase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''facebook/sam-vit-huge'''
UpperCamelCase = pipeline('''mask-generation''' , model=lowerCamelCase_)
UpperCamelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6)
# Shortening by hashing
UpperCamelCase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
] , ) | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = num_of_nodes
UpperCamelCase = []
UpperCamelCase = {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def UpperCAmelCase__ ( self , lowerCamelCase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase = self.find_component(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase_)
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase = self.find_component(lowerCamelCase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> None:
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n')
num_of_components -= 1
UpperCamelCase = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}')
def __snake_case ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''mobilenet_v1'''
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=2_2_4 , lowerCamelCase_=1.0 , lowerCamelCase_=8 , lowerCamelCase_="relu6" , lowerCamelCase_=True , lowerCamelCase_=0.999 , lowerCamelCase_=0.02 , lowerCamelCase_=0.001 , **lowerCamelCase_ , ) -> int:
super().__init__(**lowerCamelCase_)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = hidden_act
UpperCamelCase = tf_padding
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def UpperCAmelCase__ ( self) -> float:
return 1e-4 | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase = BitConfig(
conv_layer=_lowercase ,num_labels=1000 ,idalabel=_lowercase ,labelaid=_lowercase ,)
return config
def __snake_case ( _lowercase ):
"""simple docstring"""
if "stem.conv" in name:
UpperCamelCase = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
UpperCamelCase = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
UpperCamelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase = '''bit.encoder.''' + name
return name
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
return im
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase = get_config(_lowercase )
# load original model from timm
UpperCamelCase = create_model(_lowercase ,pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCamelCase = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({} ,model=_lowercase ) )
UpperCamelCase = transform.transforms
UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase = BitImageProcessor(
do_resize=_lowercase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=_lowercase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=_lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
UpperCamelCase = prepare_img()
UpperCamelCase = transform(_lowercase ).unsqueeze(0 )
UpperCamelCase = processor(_lowercase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase ,_lowercase )
# verify logits
with torch.no_grad():
UpperCamelCase = model(_lowercase )
UpperCamelCase = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
A_ = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
A_ = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser((ModelArguments,) )
((UpperCamelCase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path ,decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path ,encoder_config=_lowercase ,decoder_config=_lowercase ,)
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCamelCase = decoder_config.decoder_start_token_id
UpperCamelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCamelCase = decoder_config.bos_token_id
if pad_token_id is None:
UpperCamelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCamelCase = decoder_config.eos_token_id
UpperCamelCase = decoder_start_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __snake_case ( _lowercase = "mumbai" ):
"""simple docstring"""
UpperCamelCase = BeautifulSoup(requests.get(url + location ).content ,'''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' ,attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCamelCase = job.find('''a''' ,attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCamelCase = job.find('''span''' ,{'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}') | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( _lowercase=None ,_lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=_lowercase )
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
A_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
A_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
A_ = list_field(
default=lowerCamelCase_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
int(_lowercase )
return True
except ValueError:
return False
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
float(_lowercase )
return True
except ValueError:
return False
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = args
UpperCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline='''''') as csv_file:
UpperCamelCase = csv.DictReader(lowerCamelCase_)
for row in reader:
UpperCamelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size''']))
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length''']))
if can_convert_to_int(row['''result''']):
# value is not None
UpperCamelCase = int(row['''result'''])
elif can_convert_to_float(row['''result''']):
# value is not None
UpperCamelCase = float(row['''result'''])
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase , UpperCamelCase = plt.subplots()
UpperCamelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCamelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''')
ax.set_yscale('''log''')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
UpperCamelCase = sorted(set(self.result_dict[model_name]['''bsz''']))
UpperCamelCase = sorted(set(self.result_dict[model_name]['''seq_len''']))
UpperCamelCase = self.result_dict[model_name]['''result''']
((UpperCamelCase) , (UpperCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , )
else:
UpperCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCamelCase) , (UpperCamelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCamelCase = np.asarray(lowerCamelCase_ , lowerCamelCase_)[: len(lowerCamelCase_)]
plt.scatter(
lowerCamelCase_ , lowerCamelCase_ , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}')
plt.plot(lowerCamelCase_ , lowerCamelCase_ , '''--''')
title_str += F' {label_model_name} vs.'
UpperCamelCase = title_str[:-4]
UpperCamelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase_)
plt.xlabel(lowerCamelCase_)
plt.ylabel(lowerCamelCase_)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(_lowercase )
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
UpperCamelCase = Plot(args=_lowercase )
plot.plot()
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __snake_case ( _lowercase ):
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''pegasus'''
A_ = ['''past_key_values''']
A_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase_=5_0_2_6_5 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=1_2 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=1_6 , lowerCamelCase_=1_2 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=1_6 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="gelu" , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=0 , lowerCamelCase_=False , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=1 , **lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
@property
def UpperCAmelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self) -> int:
return self.d_model | 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __snake_case ( _lowercase = "AAPL" ):
"""simple docstring"""
UpperCamelCase = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase = BeautifulSoup(requests.get(_lowercase ).text ,'''html.parser''' )
UpperCamelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' ,class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}') | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 9.80665
def __snake_case ( _lowercase ,_lowercase ,_lowercase = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 34 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , ) -> List[Any]:
UpperCamelCase = parent
UpperCamelCase = 1_3
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 2
UpperCamelCase = 9_9
UpperCamelCase = 0
UpperCamelCase = 3_2
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 5_1_2
UpperCamelCase = 1_6
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = '''last'''
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = 0
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
UpperCamelCase = None
if self.use_input_lengths:
UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase = TFFlaubertModel(config=lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase = model(lowerCamelCase_)
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = TFFlaubertWithLMHeadModel(lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
UpperCamelCase = TFFlaubertForSequenceClassification(lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase = self.num_labels
UpperCamelCase = TFFlaubertForTokenClassification(config=lowerCamelCase_)
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase = self.num_choices
UpperCamelCase = TFFlaubertForMultipleChoice(config=lowerCamelCase_)
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1) , (1, self.num_choices, 1))
UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = TFFlaubertModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=3_7)
def UpperCAmelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> List[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFFlaubertModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
UpperCamelCase = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase = model(lowerCamelCase_)[0]
UpperCamelCase = tf.TensorShape((1, 8, 5_1_2))
self.assertEqual(output.shape , lowerCamelCase_)
# compare the actual values for a slice.
UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4)) | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase = os.path.join(self.tmpdirname , lowerCamelCase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
processor.save_pretrained(self.tmpdirname)
UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCamelCase = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0)
UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(lowerCamelCase_ , return_tensors='''np''')
UpperCamelCase = processor(images=lowerCamelCase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
UpperCamelCase = '''lower newer'''
UpperCamelCase = processor(text=lowerCamelCase_)
UpperCamelCase = tokenizer(lowerCamelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase_):
processor()
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(lowerCamelCase_)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_)
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 34 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''detr'''
A_ = ['''past_key_values''']
A_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=3 , lowerCamelCase_=1_0_0 , lowerCamelCase_=6 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=8 , lowerCamelCase_=6 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=8 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=2_5_6 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_="sine" , lowerCamelCase_="resnet50" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=1 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=1 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , **lowerCamelCase_ , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = backbone_config.get('''model_type''')
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(lowerCamelCase_)
# set timm attributes to None
UpperCamelCase , UpperCamelCase , UpperCamelCase = None, None, None
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = encoder_layers
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self) -> int:
return self.d_model
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
return cls(backbone_config=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict[str, any]:
UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase__ ( self) -> float:
return 1e-5
@property
def UpperCAmelCase__ ( self) -> int:
return 1_2 | 34 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 11
UpperCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase ,_lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase ,_lowercase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
UpperCamelCase = 10
return solutions
def __snake_case ( _lowercase = 2 ):
"""simple docstring"""
UpperCamelCase = 1.0
for fraction in fraction_list(_lowercase ):
UpperCamelCase = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 34 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 42
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("DownEncoderBlock2D",) , lowerCamelCase_=(6_4,) , lowerCamelCase_=2 , lowerCamelCase_=3_2 , lowerCamelCase_="silu" , lowerCamelCase_=True , ) -> Tuple:
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([])
# down
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(lowerCamelCase_) - 1
UpperCamelCase = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_)
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6)
UpperCamelCase = nn.SiLU()
UpperCamelCase = 2 * out_channels if double_z else out_channels
UpperCamelCase = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1)
UpperCamelCase = False
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = x
UpperCamelCase = self.conv_in(lowerCamelCase_)
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_):
def custom_forward(*lowerCamelCase_):
return module(*lowerCamelCase_)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_) , lowerCamelCase_ , use_reentrant=lowerCamelCase_)
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , lowerCamelCase_ , use_reentrant=lowerCamelCase_)
else:
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_) , lowerCamelCase_)
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , lowerCamelCase_)
else:
# down
for down_block in self.down_blocks:
UpperCamelCase = down_block(lowerCamelCase_)
# middle
UpperCamelCase = self.mid_block(lowerCamelCase_)
# post-process
UpperCamelCase = self.conv_norm_out(lowerCamelCase_)
UpperCamelCase = self.conv_act(lowerCamelCase_)
UpperCamelCase = self.conv_out(lowerCamelCase_)
return sample
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("UpDecoderBlock2D",) , lowerCamelCase_=(6_4,) , lowerCamelCase_=2 , lowerCamelCase_=3_2 , lowerCamelCase_="silu" , lowerCamelCase_="group" , ) -> Optional[int]:
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([])
UpperCamelCase = in_channels if norm_type == '''spatial''' else None
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
UpperCamelCase = list(reversed(lowerCamelCase_))
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = i == len(lowerCamelCase_) - 1
UpperCamelCase = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_)
UpperCamelCase = output_channel
# out
if norm_type == "spatial":
UpperCamelCase = SpatialNorm(block_out_channels[0] , lowerCamelCase_)
else:
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6)
UpperCamelCase = nn.SiLU()
UpperCamelCase = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1)
UpperCamelCase = False
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> str:
UpperCamelCase = z
UpperCamelCase = self.conv_in(lowerCamelCase_)
UpperCamelCase = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_):
def custom_forward(*lowerCamelCase_):
return module(*lowerCamelCase_)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_)
UpperCamelCase = sample.to(lowerCamelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_)
else:
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = sample.to(lowerCamelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_) , lowerCamelCase_ , lowerCamelCase_)
else:
# middle
UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = sample.to(lowerCamelCase_)
# up
for up_block in self.up_blocks:
UpperCamelCase = up_block(lowerCamelCase_ , lowerCamelCase_)
# post-process
if latent_embeds is None:
UpperCamelCase = self.conv_norm_out(lowerCamelCase_)
else:
UpperCamelCase = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = self.conv_act(lowerCamelCase_)
UpperCamelCase = self.conv_out(lowerCamelCase_)
return sample
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="random" , lowerCamelCase_=False , lowerCamelCase_=True) -> Any:
super().__init__()
UpperCamelCase = n_e
UpperCamelCase = vq_embed_dim
UpperCamelCase = beta
UpperCamelCase = legacy
UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCamelCase = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
UpperCamelCase = self.used.shape[0]
UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase = self.re_embed
UpperCamelCase = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.')
else:
UpperCamelCase = n_e
UpperCamelCase = sane_index_shape
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = inds.shape
assert len(lowerCamelCase_) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1)
UpperCamelCase = self.used.to(lowerCamelCase_)
UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase = match.argmax(-1)
UpperCamelCase = match.sum(2) < 1
if self.unknown_index == "random":
UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCamelCase = self.unknown_index
return new.reshape(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
UpperCamelCase = inds.shape
assert len(lowerCamelCase_) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1)
UpperCamelCase = self.used.to(lowerCamelCase_)
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase = 0 # simply set to zero
UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_)
return back.reshape(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCamelCase = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight) , dim=1)
UpperCamelCase = self.embedding(lowerCamelCase_).view(z.shape)
UpperCamelCase = None
UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCamelCase = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCamelCase = self.remap_to_used(lowerCamelCase_)
UpperCamelCase = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase = indices.reshape(shape[0] , -1) # add batch axis
UpperCamelCase = self.unmap_to_all(lowerCamelCase_)
UpperCamelCase = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCamelCase = self.embedding(lowerCamelCase_)
if shape is not None:
UpperCamelCase = z_q.view(lowerCamelCase_)
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False) -> str:
UpperCamelCase = parameters
UpperCamelCase , UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , dim=1)
UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCamelCase = deterministic
UpperCamelCase = torch.exp(0.5 * self.logvar)
UpperCamelCase = torch.exp(self.logvar)
if self.deterministic:
UpperCamelCase = UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def UpperCAmelCase__ ( self , lowerCamelCase_ = None) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCamelCase = self.mean + self.std * sample
return x
def UpperCAmelCase__ ( self , lowerCamelCase_=None) -> int:
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=[1, 2, 3]) -> Optional[int]:
if self.deterministic:
return torch.Tensor([0.0])
UpperCamelCase = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
return self.mean | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase = len(_lowercase ) if (len(_lowercase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) ,'''Stack'''.center(_lowercase ) ,'''Postfix'''.center(_lowercase ) ,sep=''' | ''' ,)
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowercase ) == 0:
stack.append(_lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowercase ) # push x to stack
print(
x.center(8 ) ,(''''''.join(_lowercase )).ljust(_lowercase ) ,(''''''.join(_lowercase )).ljust(_lowercase ) ,sep=''' | ''' ,) # Output in tabular format
while len(_lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) ,(''''''.join(_lowercase )).ljust(_lowercase ) ,(''''''.join(_lowercase )).ljust(_lowercase ) ,sep=''' | ''' ,) # Output in tabular format
return "".join(_lowercase ) # return Postfix as str
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowercase ) ):
if infix[i] == "(":
UpperCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('\nEnter an Infix Equation = ') # Input an Infix equation
SCREAMING_SNAKE_CASE_ = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 34 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self) -> Any:
raise NotImplementedError() | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase = kwargs.get('''name_or_path''')
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''')
UpperCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase = unk_token if pad_token is None else pad_token
UpperCamelCase = eos_token if bos_token is None else bos_token
else:
UpperCamelCase = '''<pad>''' if pad_token is None else pad_token
UpperCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase = re.compile(
F'[{"".join(map(lowerCamelCase_ , list(range(0 , 9)) + list(range(1_1 , 3_2)) + list(range(1_2_7 , 1_6_0)) + [1_6_0, 1_7_3, 8_2_0_3]))}]')
def __getstate__( self) -> Union[str, Any]:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self) -> int:
return len(self.sp_model)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
UpperCamelCase = self.non_printing_characters_re.sub('''''' , lowerCamelCase_)
# Normalize whitespaces
UpperCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text])
# NFC Unicode normalization
UpperCamelCase = unicodedata.normalize('''NFC''' , lowerCamelCase_)
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return self.sp_model.IdToPiece(lowerCamelCase_)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> str:
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
UpperCamelCase = []
UpperCamelCase = ''''''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase_)
UpperCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase_)
return out_string
def UpperCAmelCase__ ( self) -> Dict[str, int]:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_)
else:
UpperCamelCase = [self.preprocess_text(lowerCamelCase_) for t in text]
UpperCamelCase = self.sp_model.encode(lowerCamelCase_)
if return_tensors is True or return_tensors == "pt":
UpperCamelCase = torch.tensor(lowerCamelCase_)
return token_ids
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return self.sp_model.decode(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]:
UpperCamelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
UpperCamelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(lowerCamelCase_) + F'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase_) | 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase ,_lowercase ) )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="replace" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Dict:
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''') as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_)
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''') as merges_handle:
UpperCamelCase = merges_handle.read().split('''\n''')[1:-1]
UpperCamelCase = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def UpperCAmelCase__ ( self) -> str:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = get_pairs(lowerCamelCase_)
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_: self.bpe_ranks.get(lowerCamelCase_ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = new_word
if len(lowerCamelCase_) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_)
UpperCamelCase = ''' '''.join(lowerCamelCase_)
UpperCamelCase = word
return word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_):
UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_).split(''' '''))
return bpe_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
return self.decoder.get(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_) + '''\n''')
UpperCamelCase = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''')
UpperCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase_) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_)) + [1]
return [1] + ([0] * len(lowerCamelCase_)) + [1, 1] + ([0] * len(lowerCamelCase_)) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , **lowerCamelCase_) -> Dict:
UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_) > 0 and not text[0].isspace()):
UpperCamelCase = ''' ''' + text
return (text, kwargs) | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 1 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''xmod'''
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=("en_XX",) , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
UpperCamelCase = pre_norm
UpperCamelCase = adapter_reduction_factor
UpperCamelCase = adapter_layer_norm
UpperCamelCase = adapter_reuse_layer_norm
UpperCamelCase = ln_before_adapter
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = default_language
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
]) | 34 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 1 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = np.inf
def set_batch_size(_lowercase ) -> None:
nonlocal batch_size
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = min(_lowercase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase ,_lowercase ):
UpperCamelCase = min(_lowercase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase ,_lowercase ) and feature.dtype == "binary":
UpperCamelCase = min(_lowercase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase ,_lowercase )
return None if batch_size is np.inf else batch_size
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> List[str]:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_) else {self.split: path_or_paths}
UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
UpperCamelCase = Parquet(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , hash=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory)
return dataset
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Optional[int]:
UpperCamelCase = dataset
UpperCamelCase = path_or_buf
UpperCamelCase = batch_size or get_writer_batch_size(dataset.features)
UpperCamelCase = parquet_writer_kwargs
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
UpperCamelCase = self._write(file_obj=lowerCamelCase_ , batch_size=lowerCamelCase_ , **self.parquet_writer_kwargs)
else:
UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase_ , **self.parquet_writer_kwargs)
return written
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> int:
UpperCamelCase = 0
UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , lowerCamelCase_)
UpperCamelCase = self.dataset.features.arrow_schema
UpperCamelCase = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ , **lowerCamelCase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowerCamelCase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
UpperCamelCase = query_table(
table=self.dataset._data , key=slice(lowerCamelCase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCamelCase_)
written += batch.nbytes
writer.close()
return written | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_ = ['text', 'image', 'audio']
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowercase ,_lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
for output in outputs:
if isinstance(_lowercase ,(str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_lowercase ,(Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_lowercase ,(torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class snake_case_ :
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
self.assertTrue(hasattr(self.tool , '''inputs'''))
self.assertTrue(hasattr(self.tool , '''outputs'''))
UpperCamelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
UpperCamelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = self.tool(*lowerCamelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
UpperCamelCase = [outputs]
self.assertListEqual(output_types(lowerCamelCase_) , self.tool.outputs)
def UpperCAmelCase__ ( self) -> str:
self.assertTrue(hasattr(self.tool , '''description'''))
self.assertTrue(hasattr(self.tool , '''default_checkpoint'''))
self.assertTrue(self.tool.description.startswith('''This is a tool that'''))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs):
UpperCamelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = create_inputs(self.tool.inputs)
UpperCamelCase = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
UpperCamelCase = self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs)) | 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ = parse(importlib.metadata.version('torch'))
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase ,parse(_lowercase ) )
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return compare_versions(_lowercase ,_lowercase ,_lowercase ) | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''unispeech-sat'''
def __init__( self , __lowerCAmelCase=3_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.05 , __lowerCAmelCase=1_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0 , __lowerCAmelCase=3_2_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=0.1 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=5_0_4 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__magic_name__ :Optional[int] = hidden_size
__magic_name__ :Any = feat_extract_norm
__magic_name__ :int = feat_extract_activation
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :Dict = list(__lowerCAmelCase )
__magic_name__ :Tuple = list(__lowerCAmelCase )
__magic_name__ :Dict = conv_bias
__magic_name__ :Dict = num_conv_pos_embeddings
__magic_name__ :int = num_conv_pos_embedding_groups
__magic_name__ :Any = len(self.conv_dim )
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :List[Any] = intermediate_size
__magic_name__ :Union[str, Any] = hidden_act
__magic_name__ :List[str] = num_attention_heads
__magic_name__ :Tuple = hidden_dropout
__magic_name__ :Tuple = attention_dropout
__magic_name__ :List[str] = activation_dropout
__magic_name__ :Any = feat_proj_dropout
__magic_name__ :List[str] = final_dropout
__magic_name__ :Tuple = layerdrop
__magic_name__ :List[Any] = layer_norm_eps
__magic_name__ :List[Any] = initializer_range
__magic_name__ :Optional[Any] = vocab_size
__magic_name__ :Tuple = num_clusters
__magic_name__ :str = do_stable_layer_norm
__magic_name__ :Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ :Dict = apply_spec_augment
__magic_name__ :List[Any] = mask_time_prob
__magic_name__ :Tuple = mask_time_length
__magic_name__ :Any = mask_time_min_masks
__magic_name__ :Optional[Any] = mask_feature_prob
__magic_name__ :int = mask_feature_length
__magic_name__ :Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ :Any = num_codevectors_per_group
__magic_name__ :Dict = num_codevector_groups
__magic_name__ :List[str] = contrastive_logits_temperature
__magic_name__ :List[Any] = feat_quantizer_dropout
__magic_name__ :List[str] = num_negatives
__magic_name__ :Union[str, Any] = codevector_dim
__magic_name__ :Optional[int] = proj_codevector_dim
__magic_name__ :Optional[Any] = diversity_loss_weight
# ctc loss
__magic_name__ :Tuple = ctc_loss_reduction
__magic_name__ :Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ :Any = list(__lowerCAmelCase )
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = xvector_output_dim
@property
def A ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
from __future__ import annotations
import numpy as np
def _A ( _lowercase ) -> int:
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
a__ : ClassVar[Features] = Features({"audio": Audio()})
a__ : ClassVar[Features] = Features({"transcription": Value("string")})
a__ : str = "audio"
a__ : str = "transcription"
def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[int]:
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __lowerCAmelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
_A = copy.deepcopy(self )
_A = self.input_schema.copy()
_A = features[self.audio_column]
_A = input_schema
return task_template
@property
def snake_case_ ( self : str ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_( A : Optional[Any]=32 , A : List[Any]=10 , A : Union[str, Any]=100 , A : List[str]=1026 , A : List[Any]=True , A : Dict="data/tokenized_stories_train_wikitext103.jbl" , A : Dict="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase , UpperCamelCase = generate_datasets(
A , A , number=A , min_len=1026 , trim=A)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase = load_gpta('gpt2').to(A)
print('computing perplexity on objective set')
UpperCamelCase = compute_perplexity(A , A , A).item()
print('perplexity on objective set:' , A)
# collect igf pairs and save to file demo.jbl
collect_objective_set(A , A , A , A , A , A , A , A)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_( A : Union[str, Any] , A : Any=15 , A : List[str]=128 , A : List[str]=100 , A : Tuple="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase = SecondaryLearner(A)
# Train secondary learner
UpperCamelCase = train_secondary_learner(
A , A , max_epochs=A , batch_size=A , eval_freq=100 , igf_model_path=A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_( A : Tuple , A : List[Any] , A : int , A : Optional[int]=32 , A : Any=1000 , A : List[str]=16 , A : List[str]=1.0 , A : Union[str, Any]=recopy_gpta , A : Any=None , A : Dict=10 , A : Any="gpt2_finetuned.pt" , ):
UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase = RandomSampler(A)
UpperCamelCase = DataLoader(A , sampler=A)
UpperCamelCase = max_steps // (len(A)) + 1
UpperCamelCase = 0
UpperCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A)
UpperCamelCase , UpperCamelCase , UpperCamelCase = recopy_model(A , A , A)
model.train()
if secondary_learner is not None:
secondary_learner.to(A)
secondary_learner.eval()
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = []
UpperCamelCase = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase = compute_perplexity(A , A , A)
test_perps.append(A)
print('Test perplexity, step' , A , ':' , A)
for epoch in range(int(A)):
for step, example in enumerate(A):
torch.cuda.empty_cache()
UpperCamelCase = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase = model(A , labels=A)
UpperCamelCase = True
if secondary_learner is not None:
UpperCamelCase = secondary_learner.forward(
torch.tensor(A , dtype=torch.long , device=A).unsqueeze(0))[0].item()
observed_qs.append(float(A))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase = -1
if predicted_q < threshold:
UpperCamelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase = compute_perplexity(A , A , A)
test_perps.append(A)
print('Test perplexity, step' , A , ':' , A)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=A , default=A , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=A , default=A , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=A , type=A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=A , default=A , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=A , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=A , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=A , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=A , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=A , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=A , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=A , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=A , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=A , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=A , type=A , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=A , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=A , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=A , type=A , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase = training_secondary_learner(
A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase , UpperCamelCase = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=A)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A , A , A , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A , secondary_learner=A , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 3 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
_lowerCamelCase = namedtuple('CoinsDistribResult', 'moves excess')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase__ ) != count_coins(UpperCamelCase__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase__: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase__ )
+ abs(UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase__ , UpperCamelCase__ )
return get_distrib(UpperCamelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 0 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( _snake_case : jnp.ndarray , _snake_case : int , _snake_case : float = 1 , _snake_case : float = 1 , _snake_case : float = 1.0E4 , _snake_case : bool = False , _snake_case : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_A = float(embedding_dim // 2 )
_A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_A = min_timescale * jnp.exp(jnp.arange(_snake_case , dtype=jnp.floataa ) * -log_timescale_increment )
_A = jnp.expand_dims(_snake_case , 1 ) * jnp.expand_dims(_snake_case , 0 )
# scale embeddings
_A = scale * emb
if flip_sin_to_cos:
_A = jnp.concatenate([jnp.cos(_snake_case ), jnp.sin(_snake_case )] , axis=1 )
else:
_A = jnp.concatenate([jnp.sin(_snake_case ), jnp.cos(_snake_case )] , axis=1 )
_A = jnp.reshape(_snake_case , [jnp.shape(_snake_case )[0], embedding_dim] )
return signal
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Union[str, Any] , _UpperCAmelCase : str ):
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_UpperCAmelCase )
_A = nn.silu(_UpperCAmelCase )
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_UpperCAmelCase )
return temb
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : bool = False
UpperCAmelCase : float = 1
@nn.compact
def __call__( self : Optional[Any] , _UpperCAmelCase : str ):
return get_sinusoidal_embeddings(
_UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 7 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(__snake_case , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Any ) -> Any:
__A : Optional[int] = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = _split_gen_kwargs(__snake_case , __snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]:
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
__A : Optional[Any] = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected | 8 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=3 , _snake_case : Union[str, Any]=32 , _snake_case : Optional[int]=3 , _snake_case : List[Any]=10 , _snake_case : Union[str, Any]=[10, 20, 30, 40] , _snake_case : int=[1, 1, 2, 1] , _snake_case : Union[str, Any]=True , _snake_case : Union[str, Any]=True , _snake_case : Optional[Any]="relu" , _snake_case : Dict=3 , _snake_case : Optional[int]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : str ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : List[Any] , _snake_case : Tuple , _snake_case : str , _snake_case : str ):
"""simple docstring"""
A__ = TFResNetModel(config=_snake_case )
A__ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFResNetForImageClassification(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ : List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ : List[str] = False
A__ : str = False
A__ : List[Any] = False
A__ : Tuple = False
A__ : str = False
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = TFResNetModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def _a ( self : Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : List[Any] , _snake_case : str ):
A__ = model_class(_snake_case )
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='tf' )
# forward pass
A__ = model(**_snake_case )
# verify the logits
A__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
| 9 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCAmelCase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None ):
# Initialise PyTorch model
_UpperCamelCase = XLNetConfig.from_json_file(__snake_case )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(__snake_case )
else:
_UpperCamelCase = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
_UpperCamelCase = os.path.join(__snake_case , __snake_case )
_UpperCamelCase = os.path.join(__snake_case , __snake_case )
print(f"""Save PyTorch model to {os.path.abspath(__snake_case )}""" )
torch.save(model.state_dict() , __snake_case )
print(f"""Save configuration file to {os.path.abspath(__snake_case )}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowerCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 10 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_a , _a = array[indexa], array[indexa]
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if length > 1:
_a = int(length / 2)
for i in range(__A , low + middle):
comp_and_swap(__A , __A , i + middle , __A)
bitonic_merge(__A , __A , __A , __A)
bitonic_merge(__A , low + middle , __A , __A)
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if length > 1:
_a = int(length / 2)
bitonic_sort(__A , __A , __A , 1)
bitonic_sort(__A , low + middle , __A , 0)
bitonic_merge(__A , __A , __A , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 11 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase__ : Tuple = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase__ : Optional[int] = 0.01
with locka.acquire():
with pytest.raises(lowercase_ ):
lowercase__ : str = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : Optional[int] = """a""" * 10_00 + """.lock"""
lowercase__ : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
lowercase__ : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 12 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase__ ( UpperCAmelCase_ : Sequence[float] , UpperCAmelCase_ : bool = False ) -> float:
if not arr:
return 0
__lowerCamelCase : str = 0 if allow_empty_subarrays else float('-inf' )
__lowerCamelCase : Optional[Any] = 0.0
for num in arr:
__lowerCamelCase : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowerCamelCase : str = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 13 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __UpperCAmelCase ( __a : Any ,__a : str ,__a : List[Any]=1_024 ,__a : Optional[int]=1_024 ,__a : int=False ,**__a : str ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = AutoTokenizer.from_pretrained(__a )
_a : int = SeqaSeqDataset(__a ,__a ,__a ,__a ,type_path='''train''' ,**__a )
_a : List[str] = tok.pad_token_id
def get_lens(__a : Any ):
_a : List[Any] = tqdm(
DataLoader(__a ,batch_size=512 ,num_workers=8 ,shuffle=__a ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
_a : Optional[Any] = []
for batch in dl:
_a : List[str] = batch['''input_ids'''].ne(__a ).sum(1 ).tolist()
_a : Optional[Any] = batch['''labels'''].ne(__a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__a ,__a ):
max_lens.append(max(__a ,__a ) )
else:
max_lens.extend(__a )
return max_lens
_a : List[str] = get_lens(__a )
_a : Tuple = SeqaSeqDataset(__a ,__a ,__a ,__a ,type_path='''val''' ,**__a )
_a : Union[str, Any] = get_lens(__a )
pickle_save(__a ,train_ds.len_file )
pickle_save(__a ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 14 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : List[str] = '▁'
A : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
A : str = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
A : Dict = {'vinai/bartpho-syllable': 1_0_2_4}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Optional[int] , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ (self : Dict ) -> Any:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
lowercase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 15 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=A__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A__ , with_box_refine=A__ , two_stage=A__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 366
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __a ( A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Any ):
SCREAMING_SNAKE_CASE = dct.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : str , A__ : int ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __a ( A__ : Any , A__ : Tuple ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] , A__ : str ):
SCREAMING_SNAKE_CASE = get_deta_config(A__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(A__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_swin_q_k_v(A__ , config.backbone_config )
read_in_decoder_q_k_v(A__ , A__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(A__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(A__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A__ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 16 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int | None = None ,a__ : int | None = None ) -> None:
if start is None:
__A : int = 0
if end is None:
__A : Union[str, Any] = len(a__ ) - 1
if start >= end:
return
__A : Optional[int] = (start + end) // 2
slowsort(a__ ,a__ ,a__ )
slowsort(a__ ,mid + 1 ,a__ )
if sequence[end] < sequence[mid]:
__A , __A : Dict = sequence[mid], sequence[end]
slowsort(a__ ,a__ ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , **_lowerCAmelCase ) -> List[Any]:
super().__init__(**_lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="This is a sound of {}." ) -> List[str]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowerCAmelCase ).content
else:
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = ffmpeg_read(_lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(_lowerCAmelCase , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
_lowerCAmelCase = [text_inputs]
return inputs
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = model_inputs.pop("candidate_labels" )
_lowerCAmelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def _snake_case ( self , _lowerCAmelCase ) -> str:
_lowerCAmelCase = model_outputs.pop("candidate_labels" )
_lowerCAmelCase = model_outputs["logits"][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
_lowerCAmelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : -x[0] )
]
return result
| 18 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_a = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None ) -> List[str]:
"""simple docstring"""
if rng is None:
_UpperCamelCase = random.Random()
_UpperCamelCase = 1
for dim in shape:
total_dims *= dim
_UpperCamelCase = []
for _ in range(__snake_case ):
values.append(rng.randint(0, vocab_size - 1 ) )
_UpperCamelCase = np.array(__snake_case, dtype=jnp.intaa ).reshape(__snake_case )
return output
def lowerCamelCase__ ( __snake_case, __snake_case=None ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ids_tensor(__snake_case, vocab_size=2, rng=__snake_case )
# make sure that at least one token is attended to for each batch
_UpperCamelCase = 1
return attn_mask
@require_flax
class _UpperCAmelCase:
lowercase__ = None
lowercase__ = ()
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCamelCase = 2
_UpperCamelCase = inputs['''input_ids'''].shape[-1] // 2
_UpperCamelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_UpperCamelCase = jnp.ones_like(__a)
_UpperCamelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCamelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCamelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 0
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase = getattr(__a , __a)
_UpperCamelCase = pt_model_class(__a).eval()
_UpperCamelCase = load_flax_weights_in_pytorch_model(__a , flax_model.params)
_UpperCamelCase = flax_model.generate(__a).sequences
_UpperCamelCase = pt_model.generate(torch.tensor(__a , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCamelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = True
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 2
_UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = True
_UpperCamelCase = max_length
_UpperCamelCase = 0.8
_UpperCamelCase = 10
_UpperCamelCase = 0.3
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = max_length
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = max_length
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = False
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = True
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = 2
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
_UpperCamelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = tokenizer(__a , return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , '''do_samples'''):
model.generate(__a , do_samples=__a)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , '''foo'''):
_UpperCamelCase = {'''foo''': '''bar'''}
model.generate(__a , **__a)
| 19 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.