code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A__ : int = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> List[Any]:
inspect_dataset(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : Dict = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
inspect_metric(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : Tuple = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ) -> Dict:
__snake_case : List[str] = get_dataset_config_info(_UpperCAmelCase ,config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Tuple:
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase ,config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : int ) -> Optional[Any]:
__snake_case : Tuple = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> Optional[int]:
__snake_case : Optional[int] = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
__snake_case : Optional[int] = expected_configs[0]
assert expected_config in infos
__snake_case : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> int:
__snake_case : List[str] = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
__snake_case : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ) -> Optional[int]:
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase ,config_name=_UpperCAmelCase )
| 0 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> tuple:
__snake_case : Optional[int] = namedtuple('result' ,'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' ,power / current )
elif current == 0:
return result('current' ,power / voltage )
elif power == 0:
return result('power' ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
__snake_case : List[Any] = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
__snake_case : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_UpperCAmelCase ,1 ):
if n < _p:
# then we have our last prime to check
__snake_case : Any = primes[:idx]
break
__snake_case , __snake_case : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__snake_case : Optional[int] = False
for r in range(_UpperCAmelCase ):
__snake_case : str = pow(_UpperCAmelCase ,d * 2**r ,_UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__snake_case : Union[str, Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a_ ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
A__ = 42
A__ = None
A__ = None
A__ : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a_ ( _UpperCAmelCase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_UpperCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
__snake_case , __snake_case : Tuple = get_distrib(node.left )
__snake_case , __snake_case : List[Any] = get_distrib(node.right )
__snake_case : List[Any] = 1 - left_distrib_excess
__snake_case : Any = 1 - right_distrib_excess
__snake_case : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__snake_case : Optional[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase ,_UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def a_ ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : Tuple = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
A__ : str = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def a_ ( _UpperCAmelCase : list[float] ) -> list[float]:
__snake_case : Optional[Any] = []
__snake_case : Optional[int] = len(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
__snake_case : float = -1
for j in range(i + 1 ,_UpperCAmelCase ):
if arr[i] < arr[j]:
__snake_case : Dict = arr[j]
break
result.append(_UpperCAmelCase )
return result
def a_ ( _UpperCAmelCase : list[float] ) -> list[float]:
__snake_case : Tuple = []
for i, outer in enumerate(_UpperCAmelCase ):
__snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__snake_case : Union[str, Any] = inner
break
result.append(_UpperCAmelCase )
return result
def a_ ( _UpperCAmelCase : list[float] ) -> list[float]:
__snake_case : List[str] = len(_UpperCAmelCase )
__snake_case : list[float] = []
__snake_case : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__snake_case : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
A__ : Tuple = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A__ : List[str] = HfApi()
A__ : List[str] = {}
# fmt: off
A__ : Optional[int] = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
A__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
A__ : Optional[Any] = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
A__ : Tuple = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
A__ : Optional[int] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
A__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
A__ : List[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
A__ : List[Any] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
A__ : Union[str, Any] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
A__ : str = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
A__ : Any = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
A__ : List[Any] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
A__ : Optional[int] = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
A__ : str = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
A__ : List[str] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
A__ : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A__ : Optional[int] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
A__ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
A__ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A__ : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A__ : Union[str, Any] = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
A__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 0 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''lxmert'''
A__ = {}
def __init__( self : Optional[int] , __a : List[str]=30522 , __a : int=768 , __a : List[str]=12 , __a : int=9500 , __a : Optional[Any]=1600 , __a : str=400 , __a : str=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Tuple=512 , __a : List[Any]=2 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=1e-12 , __a : Optional[Any]=9 , __a : Union[str, Any]=5 , __a : List[Any]=5 , __a : Optional[Any]=2048 , __a : int=4 , __a : List[Any]=6.6_7 , __a : Optional[int]=True , __a : Tuple=True , __a : Dict=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Optional[int]=True , __a : int=True , **__a : Dict , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : Tuple = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : List[str] = num_object_labels
__snake_case : Any = num_attr_labels
__snake_case : str = l_layers
__snake_case : List[str] = x_layers
__snake_case : int = r_layers
__snake_case : Optional[Any] = visual_feat_dim
__snake_case : Any = visual_pos_dim
__snake_case : List[Any] = visual_loss_normalizer
__snake_case : int = task_matched
__snake_case : str = task_mask_lm
__snake_case : int = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Tuple = visual_obj_loss
__snake_case : Any = visual_attr_loss
__snake_case : Any = visual_feat_loss
__snake_case : List[str] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__a )
| 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int]=0.9_9_9 ,_UpperCAmelCase : Dict="cosine" ,) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case : Optional[Any] = []
for i in range(_UpperCAmelCase ):
__snake_case : int = i / num_diffusion_timesteps
__snake_case : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) ,_UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase ,dtype=torch.floataa )
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [e.name for e in KarrasDiffusionSchedulers]
A__ = 2
@register_to_config
def __init__( self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_0_0_8_5 , __a : float = 0.0_1_2 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : str = "linspace" , __a : int = 0 , ) -> Optional[Any]:
'''simple docstring'''
if trained_betas is not None:
__snake_case : Dict = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case : List[str] = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Tuple = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case : List[Any] = 1.0 - self.betas
__snake_case : List[str] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
def A_ ( self : List[Any] , __a : Optional[int] , __a : Union[str, Any]=None ) -> Dict:
'''simple docstring'''
if schedule_timesteps is None:
__snake_case : List[Any] = self.timesteps
__snake_case : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case : List[str] = 1 if len(__a ) > 1 else 0
else:
__snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
__snake_case : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self : str , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case : Optional[int] = self.index_for_timestep(__a )
if self.state_in_first_order:
__snake_case : Optional[int] = self.sigmas[step_index]
else:
__snake_case : Any = self.sigmas_interpol[step_index]
__snake_case : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self : Dict , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = num_inference_steps
__snake_case : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case : Optional[int] = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case : str = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case : int = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__snake_case : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case : Union[str, Any] = torch.from_numpy(np.log(__a ) ).to(__a )
__snake_case : int = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
__snake_case : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case : Tuple = torch.from_numpy(__a ).to(device=__a )
# interpolate sigmas
__snake_case : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case : Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__a ).startswith('mps' ):
# mps does not support float64
__snake_case : Dict = torch.from_numpy(__a ).to(__a , dtype=torch.floataa )
else:
__snake_case : Optional[int] = torch.from_numpy(__a ).to(__a )
# interpolate timesteps
__snake_case : List[str] = self.sigma_to_t(__a ).to(__a , dtype=timesteps.dtype )
__snake_case : Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case : str = defaultdict(__a )
def A_ ( self : int , __a : Any ) -> Any:
'''simple docstring'''
# get log sigma
__snake_case : str = sigma.log()
# get distribution
__snake_case : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case : List[str] = low_idx + 1
__snake_case : Any = self.log_sigmas[low_idx]
__snake_case : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case : int = (low - log_sigma) / (low - high)
__snake_case : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case : List[Any] = (1 - w) * low_idx + w * high_idx
__snake_case : int = t.view(sigma.shape )
return t
@property
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sample is None
def A_ ( self : Dict , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case : Any = self.index_for_timestep(__a )
# advance index counter by 1
__snake_case : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case : str = self.sigmas[step_index]
__snake_case : Optional[int] = self.sigmas_interpol[step_index + 1]
__snake_case : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case : Union[str, Any] = self.sigmas[step_index - 1]
__snake_case : Optional[int] = self.sigmas_interpol[step_index]
__snake_case : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case : List[str] = 0
__snake_case : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case : str = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case : str = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case : Optional[int] = sigma_next - sigma_hat
__snake_case : List[Any] = self.sample
__snake_case : Union[str, Any] = None
__snake_case : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def A_ ( self : List[str] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
__snake_case : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case : str = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case : Optional[Any] = self.timesteps.to(original_samples.device )
__snake_case : str = timesteps.to(original_samples.device )
__snake_case : int = [self.index_for_timestep(__a , __a ) for t in timesteps]
__snake_case : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case : Optional[int] = sigma.unsqueeze(-1 )
__snake_case : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 0 |
'''simple docstring'''
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case : List[str] = 0
# the cached sizes of the previous chains
__snake_case : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__snake_case : Optional[int] = set()
__snake_case : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 0 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Tuple = logging.get_logger(__name__)
A__ : Union[str, Any] = {'''tokenizer_file''': '''tokenizer.json'''}
A__ : int = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['''input_ids''', '''attention_mask''']
A__ = None
def __init__( self : str , __a : Any=None , __a : List[Any]=None , __a : Any=None , __a : List[str]="<unk>" , __a : Union[str, Any]="<s>" , __a : Union[str, Any]="</s>" , __a : str="<pad>" , __a : Optional[int]=False , __a : Optional[int]=False , **__a : str , ) -> List[str]:
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
__snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
__snake_case : Tuple = getattr(__a , pre_tok_state.pop('type' ) )
__snake_case : Union[str, Any] = add_prefix_space
__snake_case : str = pre_tok_class(**__a )
__snake_case : List[str] = add_prefix_space
def A_ ( self : Any , *__a : int , **__a : int ) -> BatchEncoding:
'''simple docstring'''
__snake_case : str = kwargs.get('is_split_into_words' , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*__a , **__a )
def A_ ( self : Dict , *__a : List[str] , **__a : Optional[int] ) -> BatchEncoding:
'''simple docstring'''
__snake_case : Optional[Any] = kwargs.get('is_split_into_words' , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*__a , **__a )
def A_ ( self : List[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Any = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def A_ ( self : str , __a : "Conversation" ) -> List[int]:
'''simple docstring'''
__snake_case : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
__snake_case : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> tuple[float, float, float]:
__snake_case : List[Any] = point_y / 4 / point_x
__snake_case : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__snake_case : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__snake_case : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__snake_case : Optional[int] = outgoing_gradient**2 + 4
__snake_case : Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__snake_case : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
__snake_case : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__snake_case : Tuple = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__snake_case : Optional[Any] = x_minus if isclose(_UpperCAmelCase ,_UpperCAmelCase ) else x_plus
__snake_case : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _UpperCAmelCase : float = 1.4 ,_UpperCAmelCase : float = -9.6 ) -> int:
__snake_case : int = 0
__snake_case : float = first_x_coord
__snake_case : float = first_y_coord
__snake_case : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
__snake_case , __snake_case , __snake_case : int = next_point(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> int:
__snake_case : Tuple = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
__snake_case : List[Any] = Dataset.from_dict(_UpperCAmelCase )
return dataset
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = get_dataset()
__snake_case : str = make_duplicate_clusters(__a , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case : List[str] = get_dataset()
__snake_case , __snake_case : List[str] = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __a )
| 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case__ :
A__ = None
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = os.path.join(__a , 'feat_extract.json' )
feat_extract_first.to_json_file(__a )
__snake_case : List[str] = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__snake_case : Tuple = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
A__ : Dict = get_logger(__name__)
class snake_case__ :
def __init__( self : Union[str, Any] , __a : int , __a : Tuple=None ) -> int:
'''simple docstring'''
__snake_case : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , __a , getattr(__a , __a ) )
__snake_case : Optional[int] = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class snake_case__ :
A__ = []
def __init__( self : List[str] , __a : str , __a : str , __a : Any , __a : Optional[int]=None ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = obj
__snake_case : Tuple = target
__snake_case : List[str] = new
__snake_case : int = target.split('.' )[0]
__snake_case : Dict = {}
__snake_case : Union[str, Any] = attrs or []
def __enter__( self : List[str] ) -> int:
'''simple docstring'''
*__snake_case , __snake_case : Union[str, Any] = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
__snake_case : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Any = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
__snake_case : Optional[int] = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
__snake_case : Dict = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : str = getattr(import_module('.'.join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
__snake_case : List[str] = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Union[str, Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : int , *__a : Optional[int] ) -> str:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A__ : Optional[Any] = '''CompVis/stable-diffusion-v1-1'''
A__ : int = '''CompVis/stable-diffusion-v1-2'''
A__ : List[str] = '''CompVis/stable-diffusion-v1-3'''
A__ : List[Any] = '''CompVis/stable-diffusion-v1-4'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , __a : bool = True , ) -> int:
'''simple docstring'''
super()._init_()
__snake_case : List[str] = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Dict = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Optional[Any] = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A_ ( self : Optional[int] ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith('_' )}
def A_ ( self : Tuple , __a : Optional[Union[str, int]] = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(__a )
@torch.no_grad()
def A_ ( self : Union[str, Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Dict , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : Optional[int] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Any , ) -> Any:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : str , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> Dict:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : List[str] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case : Optional[Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case : List[str] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case : Union[str, Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A__ : Optional[int] = Lock()
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__snake_case : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__snake_case : Optional[Any] = min(_UpperCAmelCase ,_UpperCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__snake_case : Tuple = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__snake_case : str = max(_UpperCAmelCase ,_UpperCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Optional[Any] ) -> Any:
__snake_case : Optional[int] = []
__snake_case : Optional[int] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__snake_case : Dict = Pipe()
__snake_case : List[str] = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
__snake_case : Any = temp_rs
__snake_case : Dict = temp_rr
for i in range(1 ,len(_UpperCAmelCase ) - 1 ):
__snake_case : str = Pipe()
__snake_case : List[Any] = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
__snake_case : Any = temp_rs
__snake_case : int = temp_rr
process_array_.append(
Process(
target=_UpperCAmelCase ,args=(
len(_UpperCAmelCase ) - 1,
arr[len(_UpperCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCAmelCase ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a_ ( ) -> List[str]:
__snake_case : Optional[Any] = list(range(10 ,0 ,-1 ) )
print('Initial List' )
print(*_UpperCAmelCase )
__snake_case : Tuple = odd_even_transposition(_UpperCAmelCase )
print('Sorted List\n' )
print(*_UpperCAmelCase )
if __name__ == "__main__":
main()
| 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case : int = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case : Optional[int] = get_activation('gelu' )
__snake_case : Union[str, Any] = get_activation('gelu_10' )
__snake_case : Tuple = torch_builtin(__a )
__snake_case : Tuple = geluaa(__a )
__snake_case : Union[str, Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__a ):
get_activation('bogus' )
with self.assertRaises(__a ):
get_activation(__a )
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = get_activation('gelu' )
__snake_case : Optional[Any] = 1
__snake_case : Tuple = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
__snake_case : List[str] = acta.a
| 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''t5'''
A__ = ['''past_key_values''']
A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : str = d_model
__snake_case : str = d_kv
__snake_case : List[Any] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Union[str, Any] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Optional[int] = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : str = layer_norm_epsilon
__snake_case : List[str] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@property
def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : Tuple = 'past_encoder_sequence + sequence'
__snake_case : Dict = {0: 'batch'}
__snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 0 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ) -> Optional[int]:
__snake_case : str = 1.5
__snake_case : int = int(factor * num_class_images )
__snake_case : Optional[int] = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_UpperCAmelCase ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=_UpperCAmelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case : str = client.query(text=_UpperCAmelCase )
if len(_UpperCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
__snake_case : int = int(factor * num_images )
__snake_case : List[Any] = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_UpperCAmelCase ,aesthetic_weight=0.1 ,)
__snake_case : Tuple = 0
__snake_case : Dict = 0
__snake_case : Optional[Any] = tqdm(desc='downloading real regularization images' ,total=_UpperCAmelCase )
with open(f'''{class_data_dir}/caption.txt''' ,'w' ) as fa, open(f'''{class_data_dir}/urls.txt''' ,'w' ) as fa, open(
f'''{class_data_dir}/images.txt''' ,'w' ) as fa:
while total < num_class_images:
__snake_case : str = class_images[count]
count += 1
try:
__snake_case : Union[str, Any] = requests.get(images['url'] )
if img.status_code == 2_00:
__snake_case : List[Any] = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a_ ( ) -> Optional[int]:
__snake_case : Optional[Any] = argparse.ArgumentParser('' ,add_help=_UpperCAmelCase )
parser.add_argument('--class_prompt' ,help='text prompt to retrieve images' ,required=_UpperCAmelCase ,type=_UpperCAmelCase )
parser.add_argument('--class_data_dir' ,help='path to save images' ,required=_UpperCAmelCase ,type=_UpperCAmelCase )
parser.add_argument('--num_class_images' ,help='number of images to download' ,default=2_00 ,type=_UpperCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
A__ : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = 0 ) -> int:
__snake_case : List[str] = right or len(lowercase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase__ ,lowercase__ ,left + 1 ,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class snake_case__ :
A__ = 42
A__ = 42
A__ = 42
@dataclass
class snake_case__ :
A__ = 42
A__ = 42
A__ = None
A__ = None
class snake_case__ ( _UpperCAmelCase ):
A__ = "train"
A__ = "dev"
A__ = "test"
class snake_case__ :
@staticmethod
def A_ ( __a : Optional[Any] , __a : Any ) -> int:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A_ ( __a : Dict ) -> Any:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A_ ( __a : Any , __a : int , __a : Any , __a : Optional[int] , __a : Tuple=False , __a : Tuple="[CLS]" , __a : Optional[int]=1 , __a : Tuple="[SEP]" , __a : List[Any]=False , __a : Optional[Any]=False , __a : List[str]=0 , __a : Optional[Any]=0 , __a : str=-100 , __a : List[Any]=0 , __a : List[str]=True , ) -> List[str]:
'''simple docstring'''
__snake_case : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
__snake_case : int = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' , _UpperCAmelCase , len(_UpperCAmelCase ) )
__snake_case : Optional[int] = []
__snake_case : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
__snake_case : Tuple = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case : List[Any] = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
__snake_case : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
__snake_case : Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case : int = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case : Any = [cls_token] + tokens
__snake_case : Dict = [pad_token_label_id] + label_ids
__snake_case : List[Any] = [cls_token_segment_id] + segment_ids
__snake_case : Dict = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case : Dict = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
__snake_case : Any = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
__snake_case : Union[str, Any] = ([pad_token] * padding_length) + input_ids
__snake_case : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case : Dict = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case : Optional[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : str = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case__ ( _UpperCAmelCase ):
A__ = 42
A__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Any , __a : str , __a : Dict , __a : int , __a : int , __a : List[Any] , __a : int = None , __a : List[Any]=False , __a : Optional[Any] = Split.train , ) -> Tuple:
'''simple docstring'''
# Load data features from cache or dataset file
__snake_case : List[Any] = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : str = cached_features_file + '''.lock'''
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case : Optional[Any] = torch.load(_UpperCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case : int = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Dict = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Optional[Any] ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , __a : List[Any] ) -> Dict:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case__ :
A__ = 42
A__ = -100
def __init__( self : str , __a : str , __a : List[str] , __a : Any , __a : Tuple , __a : Dict , __a : str = None , __a : Optional[int]=False , __a : Tuple = Split.train , ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Optional[Any] = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : int , __a : str ) -> Optional[Any]:
'''simple docstring'''
return self.features[i]
| 351 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[str] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case__ ( A__ ):
A__ = '''bridgetower_vision_model'''
def __init__( self : List[str] , __a : Optional[int]=768 , __a : int=12 , __a : Optional[int]=3 , __a : str=16 , __a : Optional[int]=288 , __a : List[Any]=1 , __a : List[str]=1e-05 , __a : Dict=False , __a : str=True , __a : Optional[int]=False , **__a : str , ) -> List[str]:
'''simple docstring'''
super().__init__(**__snake_case )
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : str = num_channels
__snake_case : Any = patch_size
__snake_case : List[Any] = image_size
__snake_case : Any = initializer_factor
__snake_case : Optional[int] = layer_norm_eps
__snake_case : int = stop_gradient
__snake_case : List[Any] = share_layernorm
__snake_case : Optional[Any] = remove_last_layer
@classmethod
def A_ ( cls : int , __a : Union[str, os.PathLike] , **__a : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
__snake_case , __snake_case : List[Any] = cls.get_config_dict(__snake_case , **__snake_case )
if config_dict.get('model_type' ) == "bridgetower":
__snake_case : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class snake_case__ ( A__ ):
A__ = '''bridgetower_text_model'''
def __init__( self : int , __a : Dict=50265 , __a : List[Any]=768 , __a : Any=12 , __a : Any=12 , __a : str=1 , __a : int=3072 , __a : int="gelu" , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Any=514 , __a : Tuple=1 , __a : Tuple=1e-05 , __a : Union[str, Any]=1 , __a : str=0 , __a : List[str]=2 , __a : Optional[int]="absolute" , __a : int=True , **__a : Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__snake_case )
__snake_case : Dict = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : Any = initializer_factor
__snake_case : Tuple = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Tuple = layer_norm_eps
__snake_case : List[str] = position_embedding_type
__snake_case : Dict = use_cache
__snake_case : Union[str, Any] = pad_token_id
__snake_case : str = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
@classmethod
def A_ ( cls : int , __a : Union[str, os.PathLike] , **__a : Dict ) -> "PretrainedConfig":
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = cls.get_config_dict(__snake_case , **__snake_case )
if config_dict.get('model_type' ) == "bridgetower":
__snake_case : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class snake_case__ ( A__ ):
A__ = '''bridgetower'''
def __init__( self : List[str] , __a : List[Any]=True , __a : str="gelu" , __a : Union[str, Any]=768 , __a : Any=1 , __a : Any=1e-05 , __a : Tuple=False , __a : List[str]="add" , __a : Tuple=12 , __a : Union[str, Any]=6 , __a : Optional[Any]=False , __a : Tuple=False , __a : Any=None , __a : List[Any]=None , **__a : List[Any] , ) -> List[str]:
'''simple docstring'''
# TODO: remove this once the Hub files are updated.
__snake_case : str = kwargs.pop('text_config_dict' , __snake_case )
__snake_case : Optional[Any] = kwargs.pop('vision_config_dict' , __snake_case )
super().__init__(**__snake_case )
__snake_case : List[Any] = share_cross_modal_transformer_layers
__snake_case : Tuple = hidden_act
__snake_case : str = hidden_size
__snake_case : Optional[int] = initializer_factor
__snake_case : Tuple = layer_norm_eps
__snake_case : Tuple = share_link_tower_layers
__snake_case : Any = link_tower_type
__snake_case : str = num_attention_heads
__snake_case : str = num_hidden_layers
__snake_case : int = tie_word_embeddings
__snake_case : List[Any] = init_layernorm_from_vision_encoder
if text_config is None:
__snake_case : Optional[int] = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
__snake_case : str = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
__snake_case : List[str] = BridgeTowerTextConfig(**__snake_case )
__snake_case : Union[str, Any] = BridgeTowerVisionConfig(**__snake_case )
@classmethod
def A_ ( cls : int , __a : BridgeTowerTextConfig , __a : BridgeTowerVisionConfig , **__a : Tuple ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : List[str] = self.text_config.to_dict()
__snake_case : Any = self.vision_config.to_dict()
__snake_case : str = self.__class__.model_type
return output
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A__ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
A__ : Dict = {'''facebook/blenderbot-3B''': 1_2_8}
class snake_case__ ( a__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = BlenderbotTokenizer
def __init__( self : Dict , __a : List[str]=None , __a : Tuple=None , __a : Dict=None , __a : List[str]="replace" , __a : str="<s>" , __a : List[str]="</s>" , __a : Optional[int]="</s>" , __a : List[str]="<s>" , __a : Optional[Any]="<unk>" , __a : str="<pad>" , __a : Optional[int]="<mask>" , __a : Dict=False , __a : Union[str, Any]=True , **__a : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
__snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space:
__snake_case : str = getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
__snake_case : Any = add_prefix_space
__snake_case : Optional[Any] = pre_tok_class(**_lowerCamelCase )
__snake_case : Tuple = add_prefix_space
__snake_case : int = '''post_processor'''
__snake_case : Union[str, Any] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
__snake_case : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Dict = tuple(state['sep'] )
if "cls" in state:
__snake_case : Dict = tuple(state['cls'] )
__snake_case : Optional[int] = False
if state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space:
__snake_case : Any = add_prefix_space
__snake_case : Union[str, Any] = True
if state.get('trim_offsets' , _lowerCamelCase ) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : List[Any] = True
if changes_to_apply:
__snake_case : int = getattr(_lowerCamelCase , state.pop('type' ) )
__snake_case : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Optional[Any] , __a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
__snake_case : str = value
def A_ ( self : int , *__a : str , **__a : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = kwargs.get('is_split_into_words' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def A_ ( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Any = kwargs.get('is_split_into_words' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def A_ ( self : List[str] , __a : str , __a : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def A_ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> str:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[str]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A_ ( self : List[Any] , __a : "Conversation" ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__snake_case : Optional[int] = ''' '''.join(_lowerCamelCase )
__snake_case : List[str] = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__snake_case : int = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case__ ( lowercase__ , lowercase__ ):
A__ = 1
@register_to_config
def __init__( self : Dict , __a : Dict=2000 , __a : Tuple=0.1 , __a : List[str]=20 , __a : Union[str, Any]=1e-3 ) -> Any:
'''simple docstring'''
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : Any = None
def A_ ( self : List[Any] , __a : int , __a : Union[str, torch.device] = None ) -> Any:
'''simple docstring'''
__snake_case : List[str] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def A_ ( self : List[str] , __a : Dict , __a : Optional[Any] , __a : List[str] , __a : List[Any]=None ) -> Optional[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__snake_case : Any = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__snake_case : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__snake_case : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
__snake_case : List[Any] = std.unsqueeze(-1 )
__snake_case : Dict = -score / std
# compute
__snake_case : Optional[Any] = -1.0 / len(self.timesteps )
__snake_case : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__snake_case : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__snake_case : str = beta_t.unsqueeze(-1 )
__snake_case : Optional[Any] = -0.5 * beta_t * x
__snake_case : Tuple = torch.sqrt(_a )
__snake_case : Optional[int] = drift - diffusion**2 * score
__snake_case : Optional[int] = x + drift * dt
# add noise
__snake_case : str = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
__snake_case : Tuple = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( _UpperCAmelCase : Optional[int] ) -> int:
return 1 / (1 + np.exp(-z ))
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> Dict:
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ) -> Optional[int]:
__snake_case : Union[str, Any] = np.dot(__snake_case ,__snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict=7_00_00 ) -> List[str]:
__snake_case : List[Any] = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
__snake_case : Tuple = np.dot(__snake_case ,__snake_case )
__snake_case : str = sigmoid_function(__snake_case )
__snake_case : Tuple = np.dot(x.T ,h - y ) / y.size
__snake_case : Union[str, Any] = theta - alpha * gradient # updating the weights
__snake_case : int = np.dot(__snake_case ,__snake_case )
__snake_case : Any = sigmoid_function(__snake_case )
__snake_case : Optional[Any] = cost_function(__snake_case ,__snake_case )
if iterations % 1_00 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A__ : List[str] = datasets.load_iris()
A__ : int = iris.data[:, :2]
A__ : Tuple = (iris.target != 0) * 1
A__ : Union[str, Any] = 0.1
A__ : Dict = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a_ ( _UpperCAmelCase : Optional[Any] ) -> Dict:
return sigmoid_function(
np.dot(__snake_case ,__snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(A__) : Dict = (x[:, 0].min(), x[:, 0].max())
(A__) : List[str] = (x[:, 1].min(), x[:, 1].max())
(A__) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A__ : int = np.c_[xxa.ravel(), xxa.ravel()]
A__ : Optional[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 355 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A__ : Dict = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case__ ( unittest.TestCase ):
A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A_ ( self : int , __a : List[Any] , __a : Union[str, Any] , __a : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : str = ZeroShotClassificationPipeline(
model=_snake_case , tokenizer=_snake_case , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A_ ( self : Tuple , __a : Any , __a : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case )], 'scores': [ANY(_snake_case )]} )
# No kwarg
__snake_case : List[Any] = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case )], 'scores': [ANY(_snake_case )]} )
__snake_case : Union[str, Any] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case )], 'scores': [ANY(_snake_case )]} )
__snake_case : str = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case ), ANY(_snake_case )], 'scores': [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__snake_case : str = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case ), ANY(_snake_case )], 'scores': [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__snake_case : Optional[Any] = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_snake_case , {'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case )], 'scores': [ANY(_snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
__snake_case : Union[str, Any] = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_snake_case , [
{'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case ), ANY(_snake_case )], 'scores': [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(1 )
] , )
__snake_case : int = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_snake_case , [
{'sequence': ANY(_snake_case ), 'labels': [ANY(_snake_case ), ANY(_snake_case )], 'scores': [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(_snake_case ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_snake_case ):
classifier(_snake_case , candidate_labels='politics' )
with self.assertRaises(_snake_case ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_snake_case ):
classifier('Who are you voting for in 2020?' , candidate_labels=_snake_case )
with self.assertRaises(_snake_case ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_snake_case ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_snake_case , )
self.run_entailment_id(_snake_case )
def A_ ( self : Optional[Any] , __a : Pipeline ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = zero_shot_classifier.model.config
__snake_case : Any = config.labelaid
__snake_case : str = zero_shot_classifier.entailment_id
__snake_case : Tuple = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__snake_case : str = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : Union[str, Any] = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : int = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__snake_case : Tuple = original_labelaid
self.assertEqual(_snake_case , zero_shot_classifier.entailment_id )
@require_torch
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : str = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__snake_case : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__snake_case : str = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
__snake_case : int = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__snake_case : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__snake_case : str = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__snake_case : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__snake_case : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__snake_case : int = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__ : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['DPTFeatureExtractor']
A__ : Any = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
class snake_case__ :
def __init__( self : Dict , __a : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__lowercase ) != 0:
__snake_case : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
__snake_case : int = rows
else:
__snake_case : Optional[Any] = []
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return len(self.rows )
@property
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return len(self.rows[0] )
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.order[0] == self.order[1]
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return bool(self.determinant() )
def A_ ( self : Any , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def A_ ( self : Union[str, Any] , __a : str , __a : List[str] ) -> List[str]:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return str(self.rows )
def __str__( self : str ) -> Union[str, Any]:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__lowercase ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def A_ ( self : int , __a : List[str] , __a : Union[str, Any] = None ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__lowercase )
else:
__snake_case : Dict = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self : Optional[Any] , __a : int , __a : Dict = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__snake_case : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__snake_case : Any = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any , __a : Any ) -> Any:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Dict , __a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return not self == other
def __neg__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return self * -1
def __add__( self : Tuple , __a : int ) -> Union[str, Any]:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Dict , __a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , __a : Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : List[Any] , __a : int ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__snake_case : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls : List[Any] , __a : Optional[Any] , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : set ) -> Any:
__snake_case : Tuple = len(__lowerCamelCase ), len(grid[0] )
if (
min(__lowerCamelCase ,__lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case : str = 0
count += depth_first_search(__lowerCamelCase ,row + 1 ,__lowerCamelCase ,__lowerCamelCase )
count += depth_first_search(__lowerCamelCase ,row - 1 ,__lowerCamelCase ,__lowerCamelCase )
count += depth_first_search(__lowerCamelCase ,__lowerCamelCase ,col + 1 ,__lowerCamelCase )
count += depth_first_search(__lowerCamelCase ,__lowerCamelCase ,col - 1 ,__lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
A__ : int = None
A__ : int = logging.get_logger(__name__)
A__ : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
A__ : List[str] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['''input_ids''', '''attention_mask''']
A__ = NllbTokenizer
A__ = []
A__ = []
def __init__( self : Optional[Any] , __a : Any=None , __a : List[str]=None , __a : Dict="<s>" , __a : int="</s>" , __a : Dict="</s>" , __a : Any="<s>" , __a : int="<unk>" , __a : List[str]="<pad>" , __a : Dict="<mask>" , __a : List[str]=None , __a : List[Any]=None , __a : str=None , __a : List[str]=False , **__a : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__snake_case : Tuple = legacy_behaviour
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , legacy_behaviour=__a , **__a , )
__snake_case : int = vocab_file
__snake_case : Any = False if not self.vocab_file else True
__snake_case : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__snake_case : List[Any] = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
__snake_case : List[Any] = self.convert_tokens_to_ids(self._src_lang )
__snake_case : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self : Optional[Any] , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Union[str, Any] , __a : List[Any] , __a : Optional[Any] = None ) -> Dict:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : List[str] , __a : List[str] , __a : List[Any] = None ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : Tuple , __a : List[str] , __a : Any , __a : Dict , __a : Union[str, Any] , **__a : List[str] ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__snake_case : Dict = src_lang
__snake_case : Optional[int] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__snake_case : Tuple = self.convert_tokens_to_ids(__a )
__snake_case : Optional[int] = tgt_lang_id
return inputs
def A_ ( self : List[str] , __a : Dict , __a : str = "eng_Latn" , __a : List[Any] = None , __a : List[Any] = "fra_Latn" , **__a : List[Any] , ) -> int:
'''simple docstring'''
__snake_case : str = src_lang
__snake_case : Dict = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : Dict , __a : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__snake_case : Optional[Any] = []
__snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Tuple = [self.cur_lang_code]
__snake_case : str = [self.eos_token_id]
__snake_case : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : int = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : List[str] , __a : Dict ) -> Dict:
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__snake_case : List[str] = []
__snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Union[str, Any] = [self.cur_lang_code]
__snake_case : Optional[Any] = [self.eos_token_id]
__snake_case : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Dict , __a : Tuple , __a : List[Any] = None ) -> List[Any]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__snake_case : int = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 361 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : str = tempfile.mkdtemp()
__snake_case : List[Any] = BlipImageProcessor()
__snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__snake_case : List[Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
__snake_case : int = InstructBlipProcessor(__a , __a , __a )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : int , **__a : Any ) -> str:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).tokenizer
def A_ ( self : Any , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def A_ ( self : Union[str, Any] , **__a : Tuple ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).qformer_tokenizer
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
self.assertIsInstance(processor.qformer_tokenizer , __a )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_qformer_tokenizer()
__snake_case : Optional[Any] = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : Dict = image_processor(__a , return_tensors='np' )
__snake_case : List[Any] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Any = self.get_qformer_tokenizer()
__snake_case : List[Any] = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__snake_case : List[Any] = """lower newer"""
__snake_case : Tuple = processor(text=__a )
__snake_case : Tuple = tokenizer(__a , return_token_type_ids=__a )
__snake_case : List[str] = qformer_tokenizer(__a , return_token_type_ids=__a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_qformer_tokenizer()
__snake_case : Tuple = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__snake_case : List[str] = """lower newer"""
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : int = processor(text=__a , images=__a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : int = self.get_qformer_tokenizer()
__snake_case : List[str] = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Optional[Any] = processor.batch_decode(__a )
__snake_case : int = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : int = self.get_qformer_tokenizer()
__snake_case : int = InstructBlipProcessor(
tokenizer=__a , image_processor=__a , qformer_tokenizer=__a )
__snake_case : Tuple = """lower newer"""
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Dict = processor(text=__a , images=__a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 362 |
'''simple docstring'''
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case : List[str] = 0
# the cached sizes of the previous chains
__snake_case : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__snake_case : Optional[int] = set()
__snake_case : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__snake_case : int = AutoTokenizer.from_pretrained('google/mt5-small' )
__snake_case : Optional[int] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
__snake_case : Optional[Any] = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__snake_case : Tuple = model(lowercase_ , labels=lowercase_ ).loss
__snake_case : List[Any] = -tf.math.reduce_mean(lowercase_ ).numpy()
__snake_case : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 363 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Tuple = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 0 |
'''simple docstring'''
import baseaa
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
return baseaa.baaencode(string.encode('utf-8' ) )
def a_ ( _UpperCAmelCase : bytes ) -> List[str]:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
A__ : Dict = '''Hello World!'''
A__ : Union[str, Any] = baseaa_encode(test)
print(encoded)
A__ : Any = baseaa_decode(encoded)
print(decoded)
| 366 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Dict = BlipImageProcessor()
__snake_case : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
__snake_case : Optional[int] = BlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **__a : str ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def A_ ( self : List[Any] , **__a : Optional[int] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__snake_case : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Any = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Union[str, Any] = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Tuple = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__snake_case : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = 'lower newer'
__snake_case : str = processor(text=_SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : List[Any] = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Dict = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Any = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Dict = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Any = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = 'lower newer'
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 0 |
'''simple docstring'''
from torch import nn
class snake_case__ ( nn.Module ):
def __init__( self : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = class_size
__snake_case : Optional[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__snake_case : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def A_ ( self : Optional[int] , __a : str ) -> List[Any]:
'''simple docstring'''
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__snake_case : str = self.mlp(_lowerCamelCase )
return logits
| 368 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
A__ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class snake_case__ ( __UpperCamelCase ):
def __init__( self : List[str] , **__a : str ) -> Any:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Dict , __a : Union[np.ndarray, bytes, str] , **__a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A_ ( self : Tuple , **__a : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__snake_case : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__snake_case : str = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def A_ ( self : Optional[int] , __a : Tuple , __a : List[Any]=None , __a : Optional[Any]="This is a sound of {}." ) -> List[Any]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__snake_case : int = requests.get(_lowerCAmelCase ).content
else:
with open(_lowerCAmelCase , 'rb' ) as f:
__snake_case : Dict = f.read()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Union[str, Any] = ffmpeg_read(_lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(_lowerCAmelCase , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
__snake_case : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
__snake_case : Any = candidate_labels
__snake_case : int = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
__snake_case : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
__snake_case : str = [text_inputs]
return inputs
def A_ ( self : Any , __a : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = model_inputs.pop('candidate_labels' )
__snake_case : Tuple = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
__snake_case : Optional[Any] = text_inputs[0]
else:
# Batching case.
__snake_case : int = text_inputs[0][0]
__snake_case : int = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : Dict , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = model_outputs.pop('candidate_labels' )
__snake_case : Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
__snake_case : Optional[int] = logits.softmax(dim=0 )
__snake_case : Any = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
__snake_case : int = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda __a : -x[0] )
]
return result
| 369 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( _UpperCAmelCase ):
def A_ ( self : List[str] ):
'''simple docstring'''
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase_ , 'depth_multiplier' ) )
class snake_case__ :
def __init__( self : List[str] , __a : List[Any] , __a : int=13 , __a : List[str]=3 , __a : Tuple=32 , __a : List[Any]=0.2_5 , __a : int=8 , __a : Any=8 , __a : Tuple=6 , __a : Union[str, Any]=32 , __a : Tuple=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]="relu6" , __a : Dict=1280 , __a : Any=0.1 , __a : Any=0.0_2 , __a : List[Any]=True , __a : List[str]=True , __a : List[str]=10 , __a : Optional[Any]=None , ):
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Any = batch_size
__snake_case : List[str] = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = depth_multiplier
__snake_case : Dict = depth_divisible_by
__snake_case : Optional[Any] = min_depth
__snake_case : Dict = expand_ratio
__snake_case : str = tf_padding
__snake_case : List[str] = output_stride
__snake_case : List[str] = first_layer_is_expansion
__snake_case : List[str] = finegrained_output
__snake_case : Optional[Any] = hidden_act
__snake_case : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__snake_case : List[str] = classifier_dropout_prob
__snake_case : List[Any] = use_labels
__snake_case : int = is_training
__snake_case : Tuple = num_labels
__snake_case : Any = initializer_range
__snake_case : Dict = scope
def A_ ( self : Tuple ):
'''simple docstring'''
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self : int , __a : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : Tuple ):
'''simple docstring'''
__snake_case : Union[str, Any] = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A_ ( self : Optional[int] , __a : Tuple , __a : List[str] , __a : List[Any] , __a : List[Any] ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : int = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , __a : int , __a : Optional[int] , __a : Union[str, Any] , __a : int ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : Optional[int] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : str = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : Dict = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A_ ( self : int ):
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case : Tuple = config_and_inputs
__snake_case : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Optional[int] ):
'''simple docstring'''
__snake_case : Tuple = MobileNetVaModelTester(self )
__snake_case : Optional[int] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def A_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A_ ( self : List[str] ):
'''simple docstring'''
pass
def A_ ( self : List[str] ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(lowercase_ )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(__a : List[Any] , __a : List[str] , __a : int ):
__snake_case : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
__snake_case : Optional[int] = outputs.hidden_states
__snake_case : List[Any] = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def A_ ( self : Any ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def A_ ( self : Optional[int] ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def A_ ( self : Dict ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def a_ ( ) -> Tuple:
__snake_case : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def A_ ( self : str ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A_ ( self : Optional[int] ):
'''simple docstring'''
__snake_case : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase_ )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Optional[Any] = prepare_img()
__snake_case : Dict = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**lowercase_ )
# verify the logits
__snake_case : Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
__snake_case : List[str] = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
def A_ ( self : int ):
'''simple docstring'''
__snake_case : Any = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__snake_case : List[str] = model.to(lowercase_ )
__snake_case : Optional[Any] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowercase_ )
__snake_case : List[Any] = outputs.logits
# verify the logits
__snake_case : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
__snake_case : List[Any] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) )
| 370 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''t5'''
A__ = ['''past_key_values''']
A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : str = d_model
__snake_case : str = d_kv
__snake_case : List[Any] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Union[str, Any] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Optional[int] = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : str = layer_norm_epsilon
__snake_case : List[str] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@property
def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : Tuple = 'past_encoder_sequence + sequence'
__snake_case : Dict = {0: 'batch'}
__snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 0 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case__ ( __lowercase ):
A__ = '''unispeech'''
def __init__( self : Any , __a : Tuple=32 , __a : Any=768 , __a : List[str]=12 , __a : Optional[Any]=12 , __a : Tuple=3072 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : int=0.1 , __a : List[str]=0.1 , __a : Any=0.0 , __a : Tuple=0.0 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[int]=0.0_2 , __a : str=1e-5 , __a : List[Any]="group" , __a : Optional[Any]="gelu" , __a : str=(512, 512, 512, 512, 512, 512, 512) , __a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __a : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __a : List[str]=False , __a : List[str]=128 , __a : Optional[Any]=16 , __a : Union[str, Any]=False , __a : Tuple=True , __a : Dict=0.0_5 , __a : int=10 , __a : Optional[int]=2 , __a : Any=0.0 , __a : str=10 , __a : Optional[Any]=0 , __a : Any=320 , __a : Any=2 , __a : str=0.1 , __a : List[str]=100 , __a : str=256 , __a : str=256 , __a : List[str]=0.1 , __a : str="mean" , __a : List[Any]=False , __a : Tuple=False , __a : Any=256 , __a : List[Any]=80 , __a : List[str]=0 , __a : List[Any]=1 , __a : Union[str, Any]=2 , __a : List[Any]=0.5 , **__a : Any , ) -> Dict:
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__snake_case : Optional[int] = hidden_size
__snake_case : List[str] = feat_extract_norm
__snake_case : Union[str, Any] = feat_extract_activation
__snake_case : Any = list(_a )
__snake_case : Optional[int] = list(_a )
__snake_case : Dict = list(_a )
__snake_case : List[str] = conv_bias
__snake_case : int = num_conv_pos_embeddings
__snake_case : List[Any] = num_conv_pos_embedding_groups
__snake_case : List[str] = len(self.conv_dim )
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Dict = attention_dropout
__snake_case : Tuple = activation_dropout
__snake_case : Any = feat_proj_dropout
__snake_case : Union[str, Any] = final_dropout
__snake_case : Dict = layerdrop
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = num_ctc_classes
__snake_case : int = vocab_size
__snake_case : List[Any] = do_stable_layer_norm
__snake_case : str = use_weighted_layer_sum
__snake_case : Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Optional[Any] = apply_spec_augment
__snake_case : Dict = mask_time_prob
__snake_case : Tuple = mask_time_length
__snake_case : Tuple = mask_time_min_masks
__snake_case : List[Any] = mask_feature_prob
__snake_case : Dict = mask_feature_length
__snake_case : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : int = num_codevectors_per_group
__snake_case : Optional[Any] = num_codevector_groups
__snake_case : str = contrastive_logits_temperature
__snake_case : Tuple = feat_quantizer_dropout
__snake_case : Optional[Any] = num_negatives
__snake_case : str = codevector_dim
__snake_case : Tuple = proj_codevector_dim
__snake_case : Optional[Any] = diversity_loss_weight
# ctc loss
__snake_case : List[Any] = ctc_loss_reduction
__snake_case : Any = ctc_zero_infinity
# pretraining loss
__snake_case : Any = replace_prob
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class snake_case__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : int , __a : int , __a : int , __a : str , ) -> str:
'''simple docstring'''
super().__init__()
__snake_case : Any = value_function
__snake_case : Union[str, Any] = unet
__snake_case : Tuple = scheduler
__snake_case : Dict = env
__snake_case : Tuple = env.get_dataset()
__snake_case : Optional[Any] = {}
for key in self.data.keys():
try:
__snake_case : Dict = self.data[key].mean()
except: # noqa: E722
pass
__snake_case : Tuple = {}
for key in self.data.keys():
try:
__snake_case : Union[str, Any] = self.data[key].std()
except: # noqa: E722
pass
__snake_case : List[str] = env.observation_space.shape[0]
__snake_case : List[Any] = env.action_space.shape[0]
def A_ ( self : Union[str, Any] , __a : List[str] , __a : List[Any] ) -> Tuple:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def A_ ( self : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def A_ ( self : Optional[Any] , __a : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if type(lowercase_ ) is dict:
return {k: self.to_torch(lowercase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowercase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowercase_ , device=self.unet.device )
def A_ ( self : Union[str, Any] , __a : Tuple , __a : Optional[int] , __a : int ) -> Optional[Any]:
'''simple docstring'''
for key, val in cond.items():
__snake_case : Any = val.clone()
return x_in
def A_ ( self : List[Any] , __a : Dict , __a : Any , __a : List[Any] , __a : Dict ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = x.shape[0]
__snake_case : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__snake_case : Union[str, Any] = torch.full((batch_size,) , lowercase_ , device=self.unet.device , dtype=torch.long )
for _ in range(lowercase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__snake_case : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , lowercase_ ).sample
__snake_case : Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0]
__snake_case : Optional[Any] = self.scheduler._get_variance(lowercase_ )
__snake_case : str = torch.exp(0.5 * posterior_variance )
__snake_case : Optional[Any] = model_std * grad
__snake_case : Union[str, Any] = 0
__snake_case : Optional[Any] = x.detach()
__snake_case : Any = x + scale * grad
__snake_case : List[str] = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
__snake_case : Dict = self.unet(x.permute(0 , 2 , 1 ) , lowercase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__snake_case : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , predict_epsilon=lowercase_ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
__snake_case : int = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
__snake_case : int = self.to_torch(lowercase_ )
return x, y
def __call__( self : Optional[int] , __a : Dict , __a : Tuple=64 , __a : int=32 , __a : List[Any]=2 , __a : Optional[int]=0.1 ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = self.normalize(lowercase_ , 'observations' )
__snake_case : Optional[Any] = obs[None].repeat(lowercase_ , axis=0 )
__snake_case : Union[str, Any] = {0: self.to_torch(lowercase_ )}
__snake_case : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__snake_case : Tuple = randn_tensor(lowercase_ , device=self.unet.device )
__snake_case : Tuple = self.reset_xa(lowercase_ , lowercase_ , self.action_dim )
__snake_case : List[str] = self.to_torch(lowercase_ )
# run the diffusion process
__snake_case : str = self.run_diffusion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# sort output trajectories by value
__snake_case : str = y.argsort(0 , descending=lowercase_ ).squeeze()
__snake_case : Any = x[sorted_idx]
__snake_case : Optional[Any] = sorted_values[:, :, : self.action_dim]
__snake_case : Union[str, Any] = actions.detach().cpu().numpy()
__snake_case : Tuple = self.de_normalize(lowercase_ , key='actions' )
# select the action with the highest value
if y is not None:
__snake_case : Union[str, Any] = 0
else:
# if we didn't run value guiding, select a random action
__snake_case : Union[str, Any] = np.random.randint(0 , lowercase_ )
__snake_case : int = denorm_actions[selected_index, 0]
return denorm_actions
| 350 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A__ : str = logging.get_logger(__name__)
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> List[str]:
__snake_case : List[Any] = WavaVecaForSequenceClassification.from_pretrained(_snake_case ,config=_snake_case )
__snake_case : Union[str, Any] = downstream_dict['''projector.weight''']
__snake_case : Dict = downstream_dict['''projector.bias''']
__snake_case : Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__snake_case : Optional[int] = downstream_dict['''model.post_net.linear.bias''']
return model
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : str ) -> int:
__snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case ,config=_snake_case )
__snake_case : Dict = downstream_dict['''model.linear.weight''']
__snake_case : Any = downstream_dict['''model.linear.bias''']
return model
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : int = WavaVecaForXVector.from_pretrained(_snake_case ,config=_snake_case )
__snake_case : Optional[Any] = downstream_dict['''connector.weight''']
__snake_case : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case : List[Any] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case : Union[str, Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case : List[str] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[str]:
__snake_case : Tuple = torch.load(_snake_case ,map_location='cpu' )
__snake_case : Tuple = checkpoint['''Downstream''']
__snake_case : Tuple = WavaVecaConfig.from_pretrained(_snake_case )
__snake_case : str = WavaVecaFeatureExtractor.from_pretrained(
_snake_case ,return_attention_mask=_snake_case ,do_normalize=_snake_case )
__snake_case : Tuple = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__snake_case : str = convert_classification(_snake_case ,_snake_case ,_snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
__snake_case : Tuple = convert_diarization(_snake_case ,_snake_case ,_snake_case )
elif arch.endswith('ForXVector' ):
__snake_case : Dict = convert_xvector(_snake_case ,_snake_case ,_snake_case )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case : Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
A__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 351 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : Optional[int] ) -> int:
if len(_A ) < 2:
return collection
def circle_sort_util(_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> bool:
__snake_case : Optional[Any] = False
if low == high:
return swapped
__snake_case : Optional[int] = low
__snake_case : Dict = high
while left < right:
if collection[left] > collection[right]:
__snake_case : Optional[int] = (
collection[right],
collection[left],
)
__snake_case : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__snake_case : Dict = (
collection[right + 1],
collection[left],
)
__snake_case : int = True
__snake_case : List[Any] = low + int((high - low) / 2 )
__snake_case : Any = circle_sort_util(_A ,_A ,_A )
__snake_case : Optional[Any] = circle_sort_util(_A ,mid + 1 ,_A )
return swapped or left_swap or right_swap
__snake_case : Any = True
while is_not_sorted is True:
__snake_case : Dict = circle_sort_util(_A ,0 ,len(_A ) - 1 )
return collection
if __name__ == "__main__":
A__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : Optional[int]=13 , __a : int=3 , __a : int=224 , __a : Optional[Any]=30 , __a : Dict=400 , __a : Union[str, Any]=True , __a : str=None , __a : Any=True , __a : List[str]=[0.5, 0.5, 0.5] , __a : Tuple=[0.5, 0.5, 0.5] , ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
__snake_case : Dict = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Tuple = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : List[str] = max_resolution
__snake_case : Any = do_resize
__snake_case : str = size
__snake_case : str = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : Dict = image_std
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ViTImageProcessor if is_vision_available() else None
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
pass
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : List[str] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__snake_case : List[str] = image_processor(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__snake_case : Tuple = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processor(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : List[str] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__snake_case : str = image_processor(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def a_ ( _UpperCAmelCase : Callable ) -> Union[str, Any]:
@wraps(snake_case_ )
def _inner_fn(*_UpperCAmelCase : Dict ,**_UpperCAmelCase : int ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') ,snake_case_ ,)
return fn(*snake_case_ ,**snake_case_ )
return _inner_fn
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
A__ : Any = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class snake_case__ ( tr.AbstractTransform ):
def __init__( self : Dict , __a : str = " " ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = sentence_delimiter
def A_ ( self : Any , __a : str ) -> Any:
'''simple docstring'''
return list(__a )
def A_ ( self : List[str] , __a : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = []
for sent_idx, sentence in enumerate(__a ):
chars.extend(self.process_string(__a ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a ) - 1:
chars.append(self.sentence_delimiter )
return chars
A__ : int = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
A__ : Union[str, Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
A__ : List[str] = '''\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'''
A__ : int = '''\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'''
A__ : Dict = '''\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def A_ ( self : Optional[Any] , __a : Union[str, Any] , __a : List[str] , __a : List[Any]=False ) -> int:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a , __a , truth_transform=__a , hypothesis_transform=__a , )["wer"]
__snake_case : int = 0
__snake_case : Dict = 0
for prediction, reference in zip(__a , __a ):
__snake_case : Union[str, Any] = jiwer.compute_measures(
__a , __a , truth_transform=__a , hypothesis_transform=__a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 355 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : List[str] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class snake_case__ ( __lowerCamelCase ):
A__ = 'autoformer'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : bool = True , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 64 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 32 , __a : int = 32 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 100 , __a : float = 0.0_2 , __a : bool = True , __a : List[Any]=True , __a : int = 10 , __a : int = 25 , __a : int = 3 , **__a : str , ) -> Dict:
'''simple docstring'''
# time series specific configuration
__snake_case : Optional[Any] = prediction_length
__snake_case : Dict = context_length if context_length is not None else prediction_length
__snake_case : Optional[int] = distribution_output
__snake_case : List[str] = loss
__snake_case : Optional[int] = input_size
__snake_case : str = num_time_features
__snake_case : Tuple = lags_sequence
__snake_case : Optional[int] = scaling
__snake_case : Dict = num_dynamic_real_features
__snake_case : Tuple = num_static_real_features
__snake_case : List[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__snake_case : Union[str, Any] = cardinality
else:
__snake_case : int = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__snake_case : Optional[int] = embedding_dimension
else:
__snake_case : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : Optional[int] = num_parallel_samples
# Transformer architecture configuration
__snake_case : int = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case : Tuple = d_model
__snake_case : Optional[int] = encoder_attention_heads
__snake_case : Tuple = decoder_attention_heads
__snake_case : str = encoder_ffn_dim
__snake_case : Any = decoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Optional[int] = decoder_layers
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[Any] = attention_dropout
__snake_case : int = activation_dropout
__snake_case : Union[str, Any] = encoder_layerdrop
__snake_case : Union[str, Any] = decoder_layerdrop
__snake_case : List[str] = activation_function
__snake_case : Any = init_std
__snake_case : Tuple = use_cache
# Autoformer
__snake_case : Union[str, Any] = label_length
__snake_case : int = moving_average
__snake_case : Any = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Any = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class snake_case__ ( SCREAMING_SNAKE_CASE__ ):
A__ = 'luke'
def __init__( self : Any , __a : Optional[Any]=50267 , __a : Optional[int]=500000 , __a : Union[str, Any]=768 , __a : List[str]=256 , __a : Optional[int]=12 , __a : Union[str, Any]=12 , __a : Dict=3072 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[Any]=512 , __a : int=2 , __a : Any=0.0_2 , __a : int=1e-12 , __a : int=True , __a : List[str]=None , __a : Optional[int]=1 , __a : Union[str, Any]=0 , __a : str=2 , **__a : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : int = vocab_size
__snake_case : List[str] = entity_vocab_size
__snake_case : Dict = hidden_size
__snake_case : List[Any] = entity_emb_size
__snake_case : Dict = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[Any] = use_entity_aware_attention
__snake_case : Tuple = classifier_dropout
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
A__ : Any = datasets.load_iris()
A__ : Union[str, Any] = np.array(data['''data'''])
A__ : Dict = np.array(data['''target'''])
A__ : Optional[int] = data["""target_names"""]
A__ : Any = train_test_split(X, y)
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ) -> Dict:
return np.linalg.norm(np.array(lowerCAmelCase__ ) - np.array(lowerCAmelCase__ ) )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any]=5 ) -> Tuple:
__snake_case : Union[str, Any] = zip(lowerCAmelCase__ ,lowerCAmelCase__ )
# List of distances of all points from the point to be classified
__snake_case : List[str] = []
for data_point in data:
__snake_case : Dict = euclidean_distance(data_point[0] ,lowerCAmelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Any = [i[1] for i in sorted(lowerCAmelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Optional[int] = Counter(lowerCAmelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 359 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ) -> int:
return x if y == 0 else greatest_common_divisor(a__ ,x % y )
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> int:
return (x * y) // greatest_common_divisor(a__ ,a__ )
def a_ ( _UpperCAmelCase : List[str] = 20 ) -> int:
__snake_case : List[Any] = 1
for i in range(1 ,n + 1 ):
__snake_case : str = lcm(a__ ,a__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 361 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any]=3 , __a : List[str]=32 , __a : List[str]=3 , __a : str=10 , __a : str=[10, 20, 30, 40] , __a : str=[1, 1, 2, 1] , __a : int=True , __a : Tuple=True , __a : List[Any]="relu" , __a : Any=3 , __a : Tuple=None , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : Tuple = embeddings_size
__snake_case : int = hidden_sizes
__snake_case : List[Any] = depths
__snake_case : Dict = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : List[str] = hidden_act
__snake_case : Tuple = num_labels
__snake_case : Optional[int] = scope
__snake_case : List[Any] = len(_a )
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : str = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : List[str] ) -> str:
'''simple docstring'''
__snake_case : Tuple = TFResNetModel(config=_a )
__snake_case : Tuple = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , __a : Tuple , __a : Dict , __a : Dict ) -> str:
'''simple docstring'''
__snake_case : List[str] = self.num_labels
__snake_case : Union[str, Any] = TFResNetForImageClassification(_a )
__snake_case : int = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case : Dict = TFResNetModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_a )
__snake_case : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__a : str , __a : Optional[Any] , __a : List[Any] ):
__snake_case : Optional[Any] = model_class(_a )
__snake_case : Optional[int] = model(**self._prepare_for_class(_a , _a ) )
__snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Optional[Any] = layer_type
__snake_case : List[str] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_a , _a , _a )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TFResNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a_ ( ) -> List[str]:
__snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__snake_case : List[str] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : List[str] = image_processor(images=_a , return_tensors='tf' )
# forward pass
__snake_case : Any = model(**_a )
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
__snake_case : List[str] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 362 |
'''simple docstring'''
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case : List[str] = 0
# the cached sizes of the previous chains
__snake_case : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__snake_case : Optional[int] = set()
__snake_case : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> Dict:
def update_area_of_max_square(_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case : str = update_area_of_max_square(_a ,col + 1 )
__snake_case : Tuple = update_area_of_max_square(row + 1 ,col + 1 )
__snake_case : Any = update_area_of_max_square(row + 1 ,_a )
if mat[row][col]:
__snake_case : Any = 1 + min([right, diagonal, down] )
__snake_case : Optional[Any] = max(largest_square_area[0] ,_a )
return sub_problem_sol
else:
return 0
__snake_case : List[Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> Any:
def update_area_of_max_square_using_dp_array(
_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case : Any = update_area_of_max_square_using_dp_array(_a ,col + 1 ,_a )
__snake_case : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_a )
__snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 ,_a ,_a )
if mat[row][col]:
__snake_case : int = 1 + min([right, diagonal, down] )
__snake_case : Tuple = max(largest_square_area[0] ,_a )
__snake_case : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case : Dict = [0]
__snake_case : int = [[-1] * cols for _ in range(_a )]
update_area_of_max_square_using_dp_array(0 ,0 ,_a )
return largest_square_area[0]
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> Dict:
__snake_case : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case : List[Any] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__snake_case : List[str] = dp_array[row][col + 1]
__snake_case : Union[str, Any] = dp_array[row + 1][col + 1]
__snake_case : Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case : Dict = 1 + min(_a ,_a ,_a )
__snake_case : Tuple = max(dp_array[row][col] ,_a )
else:
__snake_case : Tuple = 0
return largest_square_area
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> Union[str, Any]:
__snake_case : Tuple = [0] * (cols + 1)
__snake_case : Union[str, Any] = [0] * (cols + 1)
__snake_case : Tuple = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__snake_case : int = current_row[col + 1]
__snake_case : Dict = next_row[col + 1]
__snake_case : Optional[Any] = next_row[col]
if mat[row][col] == 1:
__snake_case : str = 1 + min(_a ,_a ,_a )
__snake_case : str = max(current_row[col] ,_a )
else:
__snake_case : Optional[int] = 0
__snake_case : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 363 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A__ : str = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a_ ( _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
if isinstance(_UpperCAmelCase ,torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
__snake_case : Tuple = [image]
__snake_case : str = [trans(img.convert('RGB' ) ) for img in image]
__snake_case : Optional[Any] = torch.stack(_UpperCAmelCase )
return image
class snake_case__ ( snake_case__ ):
def __init__( self : List[Any] , __a : Optional[int] , __a : Optional[int] ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
def A_ ( self : int , __a : Optional[Any] ) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def A_ ( self : List[str] , __a : Optional[Any] , __a : Tuple , __a : Any ) -> int:
'''simple docstring'''
__snake_case : int = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
__snake_case : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
__snake_case : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self : Any , __a : int , __a : int , __a : str , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}''' )
__snake_case : Optional[int] = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case : str = init_latents.shape
__snake_case : Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
print('add noise to latents at timestep' , UpperCAmelCase_ )
__snake_case : int = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Union[str, Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Dict , __a : Union[torch.FloatTensor, PIL.Image.Image] = None , __a : float = 0.8 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : float = 0.0 , __a : int = 50 , __a : Optional[bool] = None , __a : Optional[str] = "pil" , __a : bool = True , ) -> List[str]:
'''simple docstring'''
self.check_inputs(UpperCAmelCase_ )
# 2. Preprocess image
__snake_case : Dict = preprocess(UpperCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
__snake_case : int = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
__snake_case : Optional[Any] = timesteps[:1].repeat(UpperCAmelCase_ )
# 4. Prepare latent variables
__snake_case : Optional[Any] = self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_ )
__snake_case : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase_ ):
# 1. predict noise model_output
__snake_case : Union[str, Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case : Any = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample
__snake_case : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : int = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ : Any = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class snake_case__ :
def __init__( self : Any , __a : str , __a : Optional[Any]=16 , __a : Optional[Any]=13 , __a : Any=7 , __a : int=14 , __a : List[str]=10 , __a : Dict=19 , __a : str=5 , __a : List[Any]=4 , __a : Tuple=True , __a : Tuple=16 , __a : Dict=2 , __a : List[str]=4 , __a : int=4 , __a : Tuple="gelu" , __a : int=0.1 , __a : List[str]=0.1 , __a : Tuple=[1, 2, 3, 4, 5] , __a : List[Any]=25 , __a : Any=5 , ) -> List[Any]:
'''simple docstring'''
__snake_case : str = d_model
__snake_case : Union[str, Any] = parent
__snake_case : int = batch_size
__snake_case : Tuple = prediction_length
__snake_case : Union[str, Any] = context_length
__snake_case : Optional[int] = cardinality
__snake_case : List[str] = num_time_features
__snake_case : List[Any] = lags_sequence
__snake_case : List[str] = embedding_dimension
__snake_case : int = is_training
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[Any] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : int = context_length
__snake_case : List[str] = prediction_length + label_length
__snake_case : Any = label_length
__snake_case : Optional[int] = moving_average
__snake_case : str = autocorrelation_factor
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A_ ( self : Union[str, Any] , __a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = config.context_length + max(config.lags_sequence )
__snake_case : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : Dict = floats_tensor([self.batch_size, _past_length] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Any = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.get_config()
__snake_case : List[str] = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Dict , __a : Union[str, Any] , __a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
__snake_case : Optional[Any] = model(**lowerCAmelCase__ )
__snake_case : List[str] = outputs.encoder_last_hidden_state
__snake_case : List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
__snake_case : Optional[int] = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = model.create_network_inputs(**lowerCAmelCase__ )
__snake_case , __snake_case : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : int = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__snake_case : Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : Dict = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
__snake_case : Union[str, Any] = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
__snake_case : List[Any] = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
A__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ = (AutoformerForPrediction,) if is_torch_available() else ()
A__ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : str = AutoformerModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def A_ ( self : str ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : str = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
__snake_case , __snake_case : Union[str, Any] = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info['missing_keys'] , [] )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason='Model has no tokens embeddings' )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = inspect.signature(getattr(lowerCAmelCase__ , 'forward' ) )
# The main input is the name of the argument after `self`
__snake_case : Union[str, Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(lowerCAmelCase__ )
__snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : str = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : Dict = getattr(self.model_tester , 'seq_length' , lowerCAmelCase__ )
__snake_case : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase__ )
__snake_case : Optional[Any] = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase__ )
__snake_case : Dict = getattr(self.model_tester , 'd_model' , lowerCAmelCase__ )
__snake_case : Optional[Any] = getattr(self.model_tester , 'num_attention_heads' , lowerCAmelCase__ )
__snake_case : List[str] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : int = True
__snake_case : Optional[int] = False
__snake_case : str = True
__snake_case : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Union[str, Any] = True
__snake_case : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case : Any = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : Tuple = len(lowerCAmelCase__ )
__snake_case : Any = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
__snake_case : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : str = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Dict = True
__snake_case : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def a_ ( _UpperCAmelCase : List[str]="train-batch.pt" ) -> Optional[Any]:
__snake_case : int = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' ,filename=SCREAMING_SNAKE_CASE_ ,repo_type='dataset' )
__snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ ,map_location=SCREAMING_SNAKE_CASE_ )
return batch
@require_torch
@slow
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase__ )
__snake_case : List[str] = prepare_batch()
with torch.no_grad():
__snake_case : Union[str, Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
__snake_case : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__snake_case : Dict = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase__ )
__snake_case : List[str] = prepare_batch('val-batch.pt' )
with torch.no_grad():
__snake_case : List[str] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
__snake_case : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__snake_case : Tuple = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : List[str] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase__ )
__snake_case : List[str] = prepare_batch('val-batch.pt' )
with torch.no_grad():
__snake_case : List[str] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
__snake_case : Any = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
__snake_case : List[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCAmelCase__ )
__snake_case : str = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1e-1 ) )
| 365 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 0 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case__ :
def __init__( self : Tuple , __a : Optional[Any] , __a : Tuple=13 , __a : Union[str, Any]=7 , __a : Dict=True , __a : Union[str, Any]=True , __a : List[str]=True , __a : List[str]=True , __a : Optional[Any]=99 , __a : Optional[int]=64 , __a : int=32 , __a : Optional[int]=5 , __a : Dict=4 , __a : Dict=37 , __a : Union[str, Any]="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : Tuple=512 , __a : Optional[int]=16 , __a : Optional[int]=2 , __a : Union[str, Any]=0.0_2 , __a : Optional[Any]=3 , __a : str=4 , __a : Optional[int]=None , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : int = batch_size
__snake_case : Any = seq_length
__snake_case : List[str] = is_training
__snake_case : Dict = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Optional[Any] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Any = hidden_size
__snake_case : str = embedding_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : List[str] = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[str] = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = num_labels
__snake_case : str = num_choices
__snake_case : List[Any] = scope
def A_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[Any] = None
__snake_case : List[str] = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def A_ ( self : Optional[Any] , __a : str , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : int , __a : str , __a : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[str] = MobileBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
__snake_case : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
__snake_case : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : List[str] , __a : Tuple , __a : Optional[Any] , __a : List[str] , __a : str , __a : Optional[int] , __a : int , __a : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = MobileBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Any , __a : Any , __a : Union[str, Any] , __a : int , __a : Tuple , __a : int , __a : Optional[Any] , __a : str ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MobileBertForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self : Optional[int] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : str , __a : int , __a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : int = MobileBertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , next_sentence_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self : str , __a : Any , __a : Any , __a : int , __a : Dict , __a : List[str] , __a : str , __a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = MobileBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Union[str, Any] , __a : Optional[int] , __a : Dict , __a : List[str] , __a : Optional[Any] , __a : Any , __a : Optional[Any] , __a : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : int = MobileBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int , __a : str , __a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> int:
'''simple docstring'''
__snake_case : List[Any] = self.num_labels
__snake_case : Any = MobileBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __a : Union[str, Any] , __a : str , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Optional[int] , __a : List[str] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.num_choices
__snake_case : Optional[Any] = MobileBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
A__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def A_ ( self : Tuple , __a : List[Any] , __a : str , __a : Dict=False ) -> int:
'''simple docstring'''
__snake_case : int = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__snake_case : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
__snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = MobileBertModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
def a_ ( _UpperCAmelCase : str ) -> Any:
return torch.tensor(
UpperCamelCase__ ,dtype=torch.long ,device=UpperCamelCase__ ,)
A__ : List[str] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case : Any = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(__lowerCamelCase )
__snake_case : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__snake_case : int = model(__lowerCamelCase )[0]
__snake_case : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCamelCase )
__snake_case : str = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__lowerCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__snake_case : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__snake_case : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 366 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A__ : Dict = 0B101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A__ : Optional[int] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case__ :
def __init__( self : str ) -> int:
'''simple docstring'''
__snake_case : Any = WATERMARK_BITS
__snake_case : Dict = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def A_ ( self : Tuple , __a : torch.FloatTensor ) -> List[str]:
'''simple docstring'''
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__snake_case : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__snake_case : Dict = [self.encoder.encode(_lowerCamelCase , 'dwtDct' ) for image in images]
__snake_case : Any = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0 , 3 , 1 , 2 )
__snake_case : str = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def a_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
for i in range(0 ,_snake_case ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def a_ ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
for i in range(_snake_case ,0 ,-1 ):
for _ in range(_snake_case ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def a_ ( _UpperCAmelCase : int ) -> Optional[int]:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(_snake_case ) # upper half
reverse_floyd(_snake_case ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
A__ : Optional[int] = 1
while K:
A__ : Optional[int] = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
A__ : Any = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 368 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : tuple ,_UpperCAmelCase : Path ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any]=False ,) -> Optional[Any]:
output_path.parent.mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,use_external_data_format=_UpperCAmelCase ,enable_onnx_checker=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,)
else:
export(
_UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,)
@torch.no_grad()
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> int:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case : int = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__snake_case : Union[str, Any] = "cpu"
__snake_case : int = Path(_UpperCAmelCase )
# VAE DECODER
__snake_case : Any = AutoencoderKL.from_pretrained(model_path + '/vae' )
__snake_case : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case : Any = vae_decoder.decode
onnx_export(
_UpperCAmelCase ,model_args=(
torch.randn(1 ,_UpperCAmelCase ,25 ,25 ).to(device=_UpperCAmelCase ,dtype=_UpperCAmelCase ),
False,
) ,output_path=output_path / 'vae_decoder' / 'model.onnx' ,ordered_input_names=['latent_sample', 'return_dict'] ,output_names=['sample'] ,dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} ,opset=_UpperCAmelCase ,)
del vae_decoder
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
A__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 369 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class snake_case__ ( _a ):
A__ = '''bloom'''
A__ = ['''past_key_values''']
A__ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , __a : Union[str, Any]=250880 , __a : Optional[Any]=64 , __a : Any=2 , __a : int=8 , __a : Optional[Any]=1e-5 , __a : int=0.0_2 , __a : Dict=True , __a : List[Any]=1 , __a : Tuple=2 , __a : Any=False , __a : str=0.0 , __a : Any=0.0 , __a : List[str]=1 , __a : Union[str, Any]=False , **__a : int , ):
'''simple docstring'''
__snake_case : Any = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : Dict = kwargs.pop('n_embed' , snake_case_ )
__snake_case : Union[str, Any] = hidden_size if n_embed is None else n_embed
__snake_case : List[str] = n_layer
__snake_case : Dict = n_head
__snake_case : Any = layer_norm_epsilon
__snake_case : Tuple = initializer_range
__snake_case : Any = use_cache
__snake_case : int = pretraining_tp
__snake_case : List[str] = apply_residual_connection_post_layernorm
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Dict = attention_dropout
__snake_case : List[str] = bos_token_id
__snake_case : Optional[Any] = eos_token_id
__snake_case : Any = slow_but_exact
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
class snake_case__ ( _a ):
A__ = version.parse('''1.12''' )
def __init__( self : Union[str, Any] , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ):
'''simple docstring'''
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , 'pad_token_id' , snake_case_ ):
# TODO: how to do that better?
__snake_case : Optional[Any] = 0
@property
def A_ ( self : str ):
'''simple docstring'''
__snake_case : Union[str, Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case_ , direction='inputs' , inverted_values_shape=snake_case_ )
__snake_case : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A_ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_layer
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
return self._config.n_head
@property
def A_ ( self : str ):
'''simple docstring'''
return 1e-3
def A_ ( self : Tuple , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
'''simple docstring'''
__snake_case : str = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
__snake_case : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__snake_case : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Dict = seqlen + 2
__snake_case : str = self._config.hidden_size // self.num_attention_heads
__snake_case : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__snake_case : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__snake_case : str = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
__snake_case : str = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Dict = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def A_ ( self : Optional[int] ):
'''simple docstring'''
return 13
| 370 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''t5'''
A__ = ['''past_key_values''']
A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : str = d_model
__snake_case : str = d_kv
__snake_case : List[Any] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Union[str, Any] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Optional[int] = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : str = layer_norm_epsilon
__snake_case : List[str] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@property
def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : Tuple = 'past_encoder_sequence + sequence'
__snake_case : Dict = {0: 'batch'}
__snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
for i in range(len(__lowerCAmelCase ) - 1 ,0 ,-1 ):
__snake_case : List[Any] = False
for j in range(__lowerCAmelCase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
__snake_case : Optional[int] = unsorted[j - 1], unsorted[j]
__snake_case : Any = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
__snake_case : Tuple = unsorted[j + 1], unsorted[j]
__snake_case : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
A__ : Any = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A__ : Optional[int] = logging.get_logger(__name__)
class snake_case__ ( __UpperCamelCase ):
"""simple docstring"""
def A_ ( self : str , __a : int ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__a , __a ):
__snake_case : Optional[int] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : int , __a : Any , __a : List[str] , __a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__a ) )
if isinstance(__a , __a ):
__snake_case : int = [sequences]
__snake_case : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCamelCase )
class snake_case__ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Any=ZeroShotClassificationArgumentHandler() , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def A_ ( self : List[str] , __a : str , __a : Tuple=True , __a : Tuple=True , __a : List[Any]=TruncationStrategy.ONLY_FIRST , **__a : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__snake_case : Optional[int] = self.tokenizer.eos_token
try:
__snake_case : Optional[int] = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__snake_case : Union[str, Any] = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A_ ( self : Optional[Any] , **__a : Any ) -> Dict:
'''simple docstring'''
if kwargs.get('multi_class' , __a ) is not None:
__snake_case : Optional[Any] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__snake_case : Dict = {}
if "candidate_labels" in kwargs:
__snake_case : Optional[int] = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__snake_case : List[str] = kwargs['hypothesis_template']
__snake_case : List[Any] = {}
if "multi_label" in kwargs:
__snake_case : Union[str, Any] = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union[str, List[str]] , *__a : Any , **__a : List[Any] , ) -> List[str]:
'''simple docstring'''
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
__snake_case : int = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__a , **__a )
def A_ ( self : Optional[int] , __a : Optional[int] , __a : List[Any]=None , __a : Optional[int]="This example is {}." ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
__snake_case : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def A_ ( self : Dict , __a : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = inputs['candidate_label']
__snake_case : Dict = inputs['sequence']
__snake_case : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
__snake_case : Optional[Any] = self.model(**__a )
__snake_case : Any = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def A_ ( self : Tuple , __a : Dict , __a : str=False ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = [outputs['candidate_label'] for outputs in model_outputs]
__snake_case : List[str] = [outputs['sequence'] for outputs in model_outputs]
__snake_case : Tuple = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__snake_case : Dict = logits.shape[0]
__snake_case : Dict = len(__a )
__snake_case : Union[str, Any] = N // n
__snake_case : Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__snake_case : str = self.entailment_id
__snake_case : List[str] = -1 if entailment_id == 0 else 0
__snake_case : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__snake_case : Optional[Any] = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
__snake_case : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__snake_case : int = reshaped_outputs[..., self.entailment_id]
__snake_case : Tuple = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
__snake_case : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 350 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : Union[str, Any] = 1_6
A__ : List[Any] = 3_2
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] = 16 ) -> Optional[Any]:
__snake_case : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case : Tuple = load_dataset('glue' ,'mrpc' )
def tokenize_function(_UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : List[Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=_UpperCAmelCase ,max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : Tuple = datasets.map(
_UpperCAmelCase ,batched=_UpperCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : str = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(_UpperCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__snake_case : Tuple = 8
else:
__snake_case : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase ,padding='longest' ,max_length=_UpperCAmelCase ,pad_to_multiple_of=_UpperCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
__snake_case : List[Any] = DataLoader(
tokenized_datasets['train'] ,shuffle=_UpperCAmelCase ,collate_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase )
__snake_case : Optional[int] = DataLoader(
tokenized_datasets['validation'] ,shuffle=_UpperCAmelCase ,collate_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ) -> Optional[int]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' ,_UpperCAmelCase ) == "1":
__snake_case : Dict = 2
# Initialize accelerator
__snake_case : Optional[int] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : Optional[int] = config['lr']
__snake_case : Dict = int(config['num_epochs'] )
__snake_case : int = int(config['seed'] )
__snake_case : Any = int(config['batch_size'] )
__snake_case : Optional[Any] = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
__snake_case : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case : Dict = batch_size // MAX_GPU_BATCH_SIZE
__snake_case : int = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
__snake_case , __snake_case : List[str] = get_dataloaders(_UpperCAmelCase ,_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : str = AdamW(params=model.parameters() ,lr=_UpperCAmelCase )
# Instantiate scheduler
__snake_case : Tuple = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = accelerator.prepare(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case : List[str] = model(**_UpperCAmelCase )
__snake_case : Dict = outputs.loss
__snake_case : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__snake_case : List[Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Any = model(**_UpperCAmelCase )
__snake_case : Tuple = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case : Union[str, Any] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__snake_case : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__snake_case : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase ,references=_UpperCAmelCase ,)
__snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_UpperCAmelCase )
def a_ ( ) -> Optional[Any]:
__snake_case : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
__snake_case : Union[str, Any] = parser.parse_args()
__snake_case : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case__ :
A__ = 42
A__ = None
A__ = None
def a_ ( ) -> Union[str, Any]:
__snake_case : Optional[int] = Node(1 )
__snake_case : Tuple = Node(2 )
__snake_case : Dict = Node(3 )
__snake_case : Any = Node(4 )
__snake_case : List[str] = Node(5 )
return tree
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> List[str]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( _UpperCAmelCase : str ) -> Dict:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def a_ ( _UpperCAmelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = []
if root is None:
return output
__snake_case : List[Any] = deque([root] )
while process_queue:
__snake_case : Optional[int] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : int = []
def populate_output(_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(lowerCamelCase__ ,lowerCamelCase__ )
return output
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ) -> int:
__snake_case : int = []
def populate_output(_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(lowerCamelCase__ ,lowerCamelCase__ )
return output
def a_ ( _UpperCAmelCase : str ) -> Tuple:
if root is None:
return []
__snake_case : Tuple = []
__snake_case : Tuple = 0
__snake_case : Union[str, Any] = height(lowerCamelCase__ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ ,lowerCamelCase__ ) )
__snake_case : Optional[int] = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ ,lowerCamelCase__ ) )
__snake_case : Dict = 0
return output
def a_ ( ) -> Optional[int]: # Main function for testing.
__snake_case : Dict = make_tree()
print(f'''In-order Traversal: {inorder(lowerCamelCase__ )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCamelCase__ )}''' )
print(f'''Post-order Traversal: {postorder(lowerCamelCase__ )}''' ,'\n' )
print(f'''Height of Tree: {height(lowerCamelCase__ )}''' ,'\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCamelCase__ ) ,'\n' )
print('Level-wise order Traversal: ' )
for level in range(1 ,height(lowerCamelCase__ ) + 1 ):
print(f'''Level {level}:''' ,get_nodes_from_left_to_right(lowerCamelCase__ ,level=lowerCamelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case__ :
def __init__( self : Optional[int] , __a : int , __a : List[str]=13 , __a : Dict=2 , __a : Any=24 , __a : Dict=16 , __a : Any=True , __a : int=True , __a : str=32 , __a : List[Any]=5 , __a : Optional[Any]=4 , __a : List[str]=37 , __a : List[Any]="gelu" , __a : Any=0.1 , __a : Optional[Any]=0.1 , __a : str=10 , __a : int=0.0_2 , __a : List[str]=None , __a : List[str]=2 , __a : Optional[Any]=2 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : str = batch_size
__snake_case : Union[str, Any] = patch_size
__snake_case : List[Any] = max_length
__snake_case : List[Any] = num_mel_bins
__snake_case : List[Any] = is_training
__snake_case : Dict = use_labels
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Tuple = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : Dict = scope
__snake_case : Any = frequency_stride
__snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__snake_case : Union[str, Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__snake_case : List[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
__snake_case : str = frequency_out_dimension * time_out_dimension
__snake_case : Any = num_patches + 2
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = self.get_config()
return config, input_values, labels
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A_ ( self : int , __a : List[str] , __a : Optional[Any] , __a : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = ASTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : str = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Optional[int] , __a : Tuple , __a : Dict , __a : Tuple , __a : Dict , __a : Dict ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = ASTModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_lowerCAmelCase )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : int = ['input_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def A_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = ASTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' ,filename='sample_audio.flac' ,repo_type='dataset' )
__snake_case , __snake_case : Union[str, Any] = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.default_feature_extractor
__snake_case : Any = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_lowerCAmelCase )
__snake_case : int = self.default_feature_extractor
__snake_case , __snake_case : List[str] = prepare_audio()
__snake_case : List[Any] = audio.squeeze().numpy()
__snake_case : Optional[int] = feature_extractor(_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**_lowerCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : List[Any] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( lowerCAmelCase__ ):
A__ = (DDPMParallelScheduler,)
def A_ ( self : str , **__a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A_ ( self : int ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A_ ( self : str ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config()
__snake_case : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Tuple = len(_SCREAMING_SNAKE_CASE )
__snake_case : Dict = self.dummy_model()
__snake_case : Optional[Any] = self.dummy_sample_deter
__snake_case : Any = self.dummy_sample_deter + 0.1
__snake_case : str = self.dummy_sample_deter - 0.1
__snake_case : Any = samplea.shape[0]
__snake_case : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : Any = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
__snake_case : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Optional[int] = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__snake_case : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__snake_case : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A_ ( self : int ) -> int:
'''simple docstring'''
__snake_case : str = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : int = self.dummy_sample_deter
__snake_case : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__snake_case : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__snake_case : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__snake_case : Optional[int] = pred_prev_sample
__snake_case : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__snake_case : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config(prediction_type='v_prediction' )
__snake_case : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Tuple = len(_SCREAMING_SNAKE_CASE )
__snake_case : Tuple = self.dummy_model()
__snake_case : int = self.dummy_sample_deter
__snake_case : List[str] = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__snake_case : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__snake_case : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__snake_case : Optional[Any] = pred_prev_sample
__snake_case : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__snake_case : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__snake_case : List[str] = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
__snake_case : Optional[int] = -1
else:
__snake_case : Optional[int] = timesteps[i + 1]
__snake_case : Optional[Any] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
__snake_case : List[str] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Any = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = [100, 87, 50, 1, 0]
__snake_case : Optional[int] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__snake_case : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class snake_case__ ( snake_case_ ):
A__ = '''longformer'''
def __init__( self : Optional[int] , __a : Union[List[int], int] = 512 , __a : int = 2 , __a : int = 1 , __a : int = 0 , __a : int = 2 , __a : int = 30522 , __a : int = 768 , __a : int = 12 , __a : int = 12 , __a : int = 3072 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 512 , __a : int = 2 , __a : float = 0.0_2 , __a : float = 1e-12 , __a : bool = False , **__a : Any , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__snake_case : Tuple = attention_window
__snake_case : Optional[Any] = sep_token_id
__snake_case : Union[str, Any] = bos_token_id
__snake_case : List[str] = eos_token_id
__snake_case : Union[str, Any] = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : Dict = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[Any] = onnx_export
class snake_case__ ( snake_case_ ):
def __init__( self : Any , __a : "PretrainedConfig" , __a : str = "default" , __a : "List[PatchingSpec]" = None ) -> Tuple:
'''simple docstring'''
super().__init__(__a , __a , __a )
__snake_case : Any = True
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Optional[Any] = super().outputs
if self.task == "default":
__snake_case : Dict = {0: 'batch'}
return outputs
@property
def A_ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1e-4
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A_ ( self : List[Any] , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case : Tuple = super().generate_dummy_inputs(
preprocessor=__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__snake_case : Union[str, Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
__snake_case : Union[str, Any] = 1
return inputs
| 355 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
'''simple docstring'''
import argparse
import os
import re
A__ : List[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
A__ : Optional[int] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
A__ : Dict = re.compile(R'''\s*\(\s*\"(\S[^\"]+)\"''')
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : bool = False ) -> Optional[Any]:
with open(_UpperCAmelCase ,'r' ,encoding='utf-8' ) as f:
__snake_case : List[Any] = f.read()
__snake_case : Optional[Any] = content.split('\n' )
__snake_case : Tuple = []
__snake_case : Optional[Any] = 0
while line_idx < len(_UpperCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case : int = len(re.search(r'^(\s*)\S' ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case : List[Any] = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case : Tuple = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : _re_identifier.search(_UpperCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write('\n'.join(_UpperCAmelCase ) )
elif "\n".join(_UpperCAmelCase ) != content:
return True
def a_ ( _UpperCAmelCase : bool = False ) -> Any:
__snake_case : int = [os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) for f in os.listdir(_UpperCAmelCase ) if f.endswith('.py' )]
__snake_case : str = [sort_auto_mapping(_UpperCAmelCase ,overwrite=_UpperCAmelCase ) for fname in fnames]
if not overwrite and any(_UpperCAmelCase ):
__snake_case : Tuple = [f for f, d in zip(_UpperCAmelCase ,_UpperCAmelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_UpperCAmelCase )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
A__ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
class snake_case__ :
def __init__( self : List[Any] , __a : Tuple , __a : Dict , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = None
__snake_case : Any = None
__snake_case : List[str] = graph
self._normalize_graph(lowercase__ , lowercase__ )
__snake_case : Tuple = len(lowercase__ )
__snake_case : Any = None
def A_ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any] ) -> Any:
'''simple docstring'''
if sources is int:
__snake_case : Union[str, Any] = [sources]
if sinks is int:
__snake_case : int = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
__snake_case : List[Any] = sources[0]
__snake_case : List[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
__snake_case : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__snake_case : int = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__snake_case : Dict = max_input_flow
__snake_case : Optional[Any] = 0
__snake_case : int = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__snake_case : Dict = max_input_flow
__snake_case : List[str] = size - 1
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A_ ( self : int , __a : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = algorithm(self )
class snake_case__ :
def __init__( self : Union[str, Any] , __a : List[str] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = flow_network
__snake_case : str = flow_network.verticesCount
__snake_case : Dict = flow_network.sourceIndex
__snake_case : Optional[int] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__snake_case : str = flow_network.graph
__snake_case : Tuple = False
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
__snake_case : Optional[int] = True
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
class snake_case__ ( lowerCamelCase__ ):
def __init__( self : List[Any] , __a : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase__ )
# use this to save your result
__snake_case : Optional[Any] = -1
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class snake_case__ ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __a : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(lowercase__ )
__snake_case : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
__snake_case : Union[str, Any] = [0] * self.verticies_count
__snake_case : Dict = [0] * self.verticies_count
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__snake_case : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__snake_case : Dict = 0
while i < len(lowercase__ ):
__snake_case : Any = vertices_list[i]
__snake_case : Any = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase__ ) )
__snake_case : Optional[Any] = 0
else:
i += 1
__snake_case : List[Any] = sum(self.preflow[self.source_index] )
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ , lowercase__ )
self.relabel(lowercase__ )
def A_ ( self : Any , __a : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A_ ( self : str , __a : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__snake_case : Optional[Any] = self.heights[to_index]
if min_height is not None:
__snake_case : List[str] = min_height + 1
if __name__ == "__main__":
A__ : Optional[Any] = [0]
A__ : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__ : List[str] = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 357 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A__ : Any = [0, 2_5, 5_0]
A__ : Dict = [2_5, 5_0, 7_5]
A__ : Any = fuzz.membership.trimf(X, abca)
A__ : Optional[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A__ : Optional[Any] = np.ones(7_5)
A__ : Optional[int] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
A__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A__ : List[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A__ : List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A__ : int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A__ : List[str] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A__ : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A__ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def a_ ( _UpperCAmelCase : int ) -> int:
__snake_case : List[Any] = SwinConfig()
__snake_case : Union[str, Any] = swin_name.split('_' )
__snake_case : List[str] = name_split[1]
__snake_case : Any = int(name_split[4] )
__snake_case : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__snake_case : List[Any] = 96
__snake_case : Tuple = (2, 2, 6, 2)
__snake_case : str = (3, 6, 12, 24)
elif model_size == "small":
__snake_case : str = 96
__snake_case : Any = (2, 2, 18, 2)
__snake_case : Dict = (3, 6, 12, 24)
elif model_size == "base":
__snake_case : Optional[int] = 1_28
__snake_case : Union[str, Any] = (2, 2, 18, 2)
__snake_case : Dict = (4, 8, 16, 32)
else:
__snake_case : List[Any] = 1_92
__snake_case : Optional[Any] = (2, 2, 18, 2)
__snake_case : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__snake_case : Optional[int] = 2_18_41
else:
__snake_case : List[str] = 10_00
__snake_case : str = 'huggingface/label-files'
__snake_case : Any = 'imagenet-1k-id2label.json'
__snake_case : Optional[int] = json.load(open(hf_hub_download(a__ ,a__ ,repo_type='dataset' ) ,'r' ) )
__snake_case : str = {int(a__ ): v for k, v in idalabel.items()}
__snake_case : str = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = img_size
__snake_case : Any = num_classes
__snake_case : List[Any] = embed_dim
__snake_case : Tuple = depths
__snake_case : Optional[Any] = num_heads
__snake_case : Dict = window_size
return config
def a_ ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if "patch_embed.proj" in name:
__snake_case : Union[str, Any] = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case : Any = name.replace('patch_embed.norm' ,'embeddings.norm' )
if "layers" in name:
__snake_case : Any = 'encoder.' + name
if "attn.proj" in name:
__snake_case : List[Any] = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
__snake_case : str = name.replace('attn' ,'attention.self' )
if "norm1" in name:
__snake_case : Optional[Any] = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__snake_case : str = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
__snake_case : Optional[int] = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : int = name.replace('mlp.fc2' ,'output.dense' )
if name == "norm.weight":
__snake_case : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
__snake_case : List[str] = 'layernorm.bias'
if "head" in name:
__snake_case : Optional[Any] = name.replace('head' ,'classifier' )
else:
__snake_case : str = 'swin.' + name
return name
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
__snake_case : List[Any] = orig_state_dict.pop(a__ )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case : List[Any] = key.split('.' )
__snake_case : Optional[int] = int(key_split[1] )
__snake_case : Any = int(key_split[3] )
__snake_case : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case : str = val[:dim, :]
__snake_case : Tuple = val[
dim : dim * 2, :
]
__snake_case : List[str] = val[-dim:, :]
else:
__snake_case : Dict = val[
:dim
]
__snake_case : str = val[
dim : dim * 2
]
__snake_case : Union[str, Any] = val[
-dim:
]
else:
__snake_case : Dict = val
return orig_state_dict
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ) -> Tuple:
__snake_case : Any = timm.create_model(a__ ,pretrained=a__ )
timm_model.eval()
__snake_case : Tuple = get_swin_config(a__ )
__snake_case : Dict = SwinForImageClassification(a__ )
model.eval()
__snake_case : List[Any] = convert_state_dict(timm_model.state_dict() ,a__ )
model.load_state_dict(a__ )
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' ,'-' ) ) )
__snake_case : Tuple = Image.open(requests.get(a__ ,stream=a__ ).raw )
__snake_case : Dict = image_processor(images=a__ ,return_tensors='pt' )
__snake_case : Any = timm_model(inputs['pixel_values'] )
__snake_case : Dict = model(**a__ ).logits
assert torch.allclose(a__ ,a__ ,atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 359 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]="attention" ) -> Union[str, Any]:
__snake_case : int = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__snake_case : List[Any] = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__snake_case : Tuple = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__snake_case : List[Any] = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=False ) -> Any:
if split_mlp_wi:
__snake_case : Dict = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__snake_case : str = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__snake_case : Union[str, Any] = (wi_a, wi_a)
else:
__snake_case : Tuple = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
__snake_case : Tuple = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> str:
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def a_ ( _UpperCAmelCase : dict ,*, _UpperCAmelCase : int ,_UpperCAmelCase : bool ) -> List[str]:
__snake_case : str = traverse_util.flatten_dict(variables['target'] )
__snake_case : str = {'/'.join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case : int = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' ,_UpperCAmelCase )
__snake_case : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
__snake_case : str = old['token_embedder/embedding']
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : List[Any] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'pre_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : Dict = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'attention' )
__snake_case : Optional[Any] = layer_norm
__snake_case : str = k.T
__snake_case : Union[str, Any] = o.T
__snake_case : Tuple = q.T
__snake_case : List[str] = v.T
# Block i, layer 1 (MLP).
__snake_case : Union[str, Any] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'pre_mlp_layer_norm' )
__snake_case , __snake_case : Union[str, Any] = tax_mlp_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,_UpperCAmelCase )
__snake_case : str = layer_norm
if split_mlp_wi:
__snake_case : Tuple = wi[0].T
__snake_case : str = wi[1].T
else:
__snake_case : Tuple = wi.T
__snake_case : Union[str, Any] = wo.T
__snake_case : Any = old[
'encoder/relpos_bias/rel_embedding'
].T
__snake_case : List[str] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Any = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_self_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'self_attention' )
__snake_case : List[Any] = layer_norm
__snake_case : Dict = k.T
__snake_case : int = o.T
__snake_case : Any = q.T
__snake_case : Tuple = v.T
# Block i, layer 1 (Cross Attention).
__snake_case : List[Any] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_cross_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'encoder_decoder_attention' )
__snake_case : Union[str, Any] = layer_norm
__snake_case : Tuple = k.T
__snake_case : int = o.T
__snake_case : Optional[int] = q.T
__snake_case : int = v.T
# Block i, layer 2 (MLP).
__snake_case : List[str] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_mlp_layer_norm' )
__snake_case , __snake_case : Any = tax_mlp_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,_UpperCAmelCase )
__snake_case : Optional[Any] = layer_norm
if split_mlp_wi:
__snake_case : Optional[int] = wi[0].T
__snake_case : List[str] = wi[1].T
else:
__snake_case : Tuple = wi.T
__snake_case : Dict = wo.T
__snake_case : List[str] = old['decoder/decoder_norm/scale']
__snake_case : int = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case : str = old['decoder/logits_dense/kernel'].T
return new
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : bool ) -> List[str]:
__snake_case : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case : Dict = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__snake_case : Tuple = state_dict['shared.weight']
return state_dict
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Tuple:
__snake_case : Optional[int] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case : List[Any] = convert_tax_to_pytorch(_UpperCAmelCase ,num_layers=config.num_layers ,is_encoder_only=_UpperCAmelCase )
__snake_case : Union[str, Any] = make_state_dict(_UpperCAmelCase ,_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase ,strict=_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> Optional[Any]:
__snake_case : Tuple = TaConfig.from_json_file(_UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case : int = TaEncoderModel(_UpperCAmelCase )
else:
__snake_case : Union[str, Any] = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print('Done' )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
A__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A__ : List[str] = logging.get_logger(__name__)
def a_ ( ) -> Union[str, Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
__snake_case : Any = os.getenv('SM_HP_MP_PARAMETERS' ,'{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__snake_case : Union[str, Any] = json.loads(__lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__snake_case : int = os.getenv('SM_FRAMEWORK_PARAMS' ,'{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__snake_case : Union[str, Any] = json.loads(__lowerCAmelCase )
if not mpi_options.get('sagemaker_mpi_enabled' ,__lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case__ ( UpperCamelCase_ ):
A__ = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _a , )
@cached_property
def A_ ( self : Any ) -> "torch.device":
'''simple docstring'''
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__snake_case : Optional[int] = torch.device('cpu' )
__snake_case : Tuple = 0
elif is_sagemaker_model_parallel_available():
__snake_case : str = smp.local_rank()
__snake_case : List[str] = torch.device('cuda' , _a )
__snake_case : Union[str, Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__snake_case : List[Any] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__snake_case : Any = torch.device('cuda' , self.local_rank )
__snake_case : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__snake_case : List[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__snake_case : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__snake_case : Union[str, Any] = torch.device('cuda' , self.local_rank )
__snake_case : Optional[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_a )
return device
@property
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return False
| 361 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
A__ : List[str] = '2020.9.26'
A__ : Any = 'xcodz-dot, cclaus, dhruvmanila'
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
if not all(isinstance(lowercase__ ,(float, int) ) for val in locals().values() ):
__snake_case : str = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowercase__ )
__snake_case : Optional[int] = ((x * distance) / (z + distance)) * scale
__snake_case : List[str] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ) -> Any:
if not isinstance(lowercase__ ,lowercase__ ):
raise TypeError('Axis must be a str' )
__snake_case : int = locals()
del input_variables["axis"]
if not all(isinstance(lowercase__ ,(float, int) ) for val in input_variables.values() ):
__snake_case : List[str] = (
'Input values except axis must either be float or int: '
f'''{list(input_variables.values() )}'''
)
raise TypeError(lowercase__ )
__snake_case : Optional[int] = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
__snake_case : int = x * math.cos(lowercase__ ) - y * math.sin(lowercase__ )
__snake_case : int = y * math.cos(lowercase__ ) + x * math.sin(lowercase__ )
__snake_case : Dict = z
elif axis == "x":
__snake_case : str = y * math.cos(lowercase__ ) - z * math.sin(lowercase__ )
__snake_case : str = z * math.cos(lowercase__ ) + y * math.sin(lowercase__ )
__snake_case : List[Any] = x
elif axis == "y":
__snake_case : str = x * math.cos(lowercase__ ) - z * math.sin(lowercase__ )
__snake_case : Dict = z * math.cos(lowercase__ ) + x * math.sin(lowercase__ )
__snake_case : int = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 362 |
'''simple docstring'''
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case : List[str] = 0
# the cached sizes of the previous chains
__snake_case : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__snake_case : Optional[int] = set()
__snake_case : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class snake_case__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def A_ ( self : Optional[Any] , __a : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _SCREAMING_SNAKE_CASE )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = AutoConfig.from_pretrained('gpt2' )
__snake_case : Optional[int] = GenerationConfig.from_model_config(_SCREAMING_SNAKE_CASE )
__snake_case : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = GenerationConfig()
__snake_case : Union[str, Any] = {
"max_new_tokens": 1024,
"foo": "bar",
}
__snake_case : Any = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = generation_config.update(**_SCREAMING_SNAKE_CASE )
# update_kwargs was not modified (no side effects)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_SCREAMING_SNAKE_CASE , {'foo': 'bar'} )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = GenerationConfig()
__snake_case : str = "bar"
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_SCREAMING_SNAKE_CASE )
__snake_case : str = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
__snake_case : int = GenerationConfig.from_model_config(_SCREAMING_SNAKE_CASE )
assert not hasattr(_SCREAMING_SNAKE_CASE , 'foo' ) # no new kwargs should be initialized if from config
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _SCREAMING_SNAKE_CASE )
self.assertEqual(default_config.num_beams , 1 )
__snake_case : List[str] = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _SCREAMING_SNAKE_CASE )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class snake_case__ ( unittest.TestCase ):
@classmethod
def A_ ( cls : Any ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def A_ ( cls : str ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
__snake_case : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='test-generation-config' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__snake_case : Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
__snake_case : Optional[int] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='valid_org/test-generation-config-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__snake_case : int = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) | 363 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
A__ : List[str] = None
A__ : int = logging.get_logger(__name__)
A__ : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
A__ : str = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
A__ : str = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class snake_case__ ( __snake_case ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ["input_ids", "attention_mask"]
A__ = NllbTokenizer
A__ = []
A__ = []
def __init__( self : List[Any] , __a : List[Any]=None , __a : Optional[int]=None , __a : Tuple="<s>" , __a : Any="</s>" , __a : str="</s>" , __a : List[Any]="<s>" , __a : Optional[Any]="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : List[str]=None , __a : List[Any]=None , __a : List[Any]=None , __a : List[Any]=False , **__a : Tuple , ) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
__snake_case : str = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , legacy_behaviour=lowerCamelCase_ , **lowerCamelCase_ , )
__snake_case : Optional[int] = vocab_file
__snake_case : Dict = False if not self.vocab_file else True
__snake_case : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__snake_case : str = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__snake_case : Dict = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self : List[str] , __a : str ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : List[str] , __a : List[str] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : int ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__snake_case : int = src_lang
__snake_case : List[str] = self(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
__snake_case : Tuple = self.convert_tokens_to_ids(lowerCamelCase_ )
__snake_case : int = tgt_lang_id
return inputs
def A_ ( self : int , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
__snake_case : Optional[Any] = src_lang
__snake_case : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : str ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : Tuple , __a : int ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase_ )
if self.legacy_behaviour:
__snake_case : str = []
__snake_case : int = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Union[str, Any] = [self.cur_lang_code]
__snake_case : int = [self.eos_token_id]
__snake_case : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Any , __a : str ) -> None:
'''simple docstring'''
__snake_case : Any = self.convert_tokens_to_ids(lowerCamelCase_ )
if self.legacy_behaviour:
__snake_case : Any = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Dict = [self.cur_lang_code]
__snake_case : List[str] = [self.eos_token_id]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__snake_case : Optional[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
from timeit import timeit
A__ : Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def a_ ( _UpperCAmelCase : List[Any] ) -> bool:
__snake_case : Any = 0
__snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Dict = len(SCREAMING_SNAKE_CASE_ ) // 2
__snake_case : str = len(SCREAMING_SNAKE_CASE_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE_ ) )
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def a_ ( _UpperCAmelCase : List[Any] ) -> bool:
return s == s[::-1]
def a_ ( _UpperCAmelCase : Dict ) -> None:
__snake_case : Dict = f'''all({name}(key) is value for key, value in test_data.items())'''
__snake_case : Optional[Any] = f'''from __main__ import test_data, {name}'''
__snake_case : Tuple = 50_00_00
__snake_case : Union[str, Any] = timeit(stmt=SCREAMING_SNAKE_CASE_ ,setup=SCREAMING_SNAKE_CASE_ ,number=SCREAMING_SNAKE_CASE_ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 365 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class snake_case__ :
def __init__( self : Dict , __a : list[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def A_ ( self : Any , __a : int , __a : str ) -> Union[str, Any]:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self : int , __a : str ) -> List[Any]:
'''simple docstring'''
__snake_case : int = 0
for character in keyword:
__snake_case : Any = self.find_next_state(_UpperCAmelCase , _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case : Optional[int] = len(self.adlist ) - 1
else:
__snake_case : str = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
__snake_case : List[Any] = 0
while q:
__snake_case : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
__snake_case : Dict = self.adlist[r]['fail_state']
while (
self.find_next_state(_UpperCAmelCase , self.adlist[child]['value'] ) is None
and state != 0
):
__snake_case : Optional[int] = self.adlist[state]['fail_state']
__snake_case : Union[str, Any] = self.find_next_state(
_UpperCAmelCase , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__snake_case : Optional[Any] = 0
__snake_case : int = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def A_ ( self : Any , __a : str ) -> int:
'''simple docstring'''
__snake_case : List[Any] = {} # returns a dict with keywords and list of its occurrences
__snake_case : Tuple = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase , string[i] ) is None
and current_state != 0
):
__snake_case : List[str] = self.adlist[current_state]['fail_state']
__snake_case : Optional[int] = self.find_next_state(_UpperCAmelCase , string[i] )
if next_state is None:
__snake_case : List[Any] = 0
else:
__snake_case : Tuple = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case : Union[str, Any] = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> list[int]:
__snake_case : str = 0
__snake_case : Optional[int] = len(_UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case : int = i + 1
else:
__snake_case : Optional[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 1_1, 1_5], 9) = }""")
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( _UpperCAmelCase : Tuple ) -> Dict[str, torch.Tensor]:
__snake_case : Optional[int] = []
__snake_case : Tuple = []
__snake_case : Optional[Any] = []
for rt in rc.restypes:
__snake_case : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__snake_case : List[Any] = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__snake_case : Optional[Any] = torch.tensor(
a__ ,dtype=torch.intaa ,device=protein['aatype'].device ,)
__snake_case : List[str] = torch.tensor(
a__ ,dtype=torch.intaa ,device=protein['aatype'].device ,)
__snake_case : Dict = torch.tensor(
a__ ,dtype=torch.floataa ,device=protein['aatype'].device ,)
__snake_case : Optional[int] = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__snake_case : Tuple = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Tuple = restype_atomaa_mask[protein_aatype]
__snake_case : Optional[int] = residx_atomaa_mask
__snake_case : int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__snake_case : Tuple = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__snake_case : Optional[Any] = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
__snake_case : str = rc.restype_atoa[restype_letter]
__snake_case : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__snake_case : int = rc.atom_order[atom_name]
__snake_case : Dict = 1
__snake_case : int = restype_atomaa_mask[protein_aatype]
__snake_case : Tuple = residx_atomaa_mask
return protein
def a_ ( _UpperCAmelCase : Optional[int] ) -> Dict[str, np.ndarray]:
__snake_case : Optional[int] = tree_map(lambda _UpperCAmelCase : torch.tensor(a__ ,device=batch['aatype'].device ) ,a__ ,np.ndarray )
__snake_case : Any = tensor_tree_map(lambda _UpperCAmelCase : np.array(a__ ) ,make_atomaa_masks(a__ ) )
return out
| 368 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 0 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
A__ : List[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
A__ : Optional[int] = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = BartTokenizer
def __init__( self : Union[str, Any] , __a : List[Any]=None , __a : Tuple=None , __a : List[str]=None , __a : Union[str, Any]="replace" , __a : Dict="<s>" , __a : Union[str, Any]="</s>" , __a : Union[str, Any]="</s>" , __a : Any="<s>" , __a : Dict="<unk>" , __a : List[Any]="<pad>" , __a : List[str]="<mask>" , __a : Tuple=False , __a : Any=True , **__a : List[Any] , ):
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__snake_case : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case : int = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('type' ) )
__snake_case : Dict = add_prefix_space
__snake_case : List[Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Optional[Any] = '''post_processor'''
__snake_case : Tuple = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state['sep'] )
if "cls" in state:
__snake_case : List[str] = tuple(state['cls'] )
__snake_case : int = False
if state.get('add_prefix_space' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case : Any = add_prefix_space
__snake_case : Any = True
if state.get('trim_offsets' , _SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : List[Any] = getattr(_SCREAMING_SNAKE_CASE , state.pop('type' ) )
__snake_case : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def A_ ( self : List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Dict , __a : Any ):
'''simple docstring'''
__snake_case : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
__snake_case : List[str] = value
def A_ ( self : List[str] , *__a : Optional[Any] , **__a : Union[str, Any] ):
'''simple docstring'''
__snake_case : int = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self : int , *__a : str , **__a : Optional[int] ):
'''simple docstring'''
__snake_case : Optional[int] = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self : Any , __a : Any , __a : Dict = None ):
'''simple docstring'''
__snake_case : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def A_ ( self : Union[str, Any] , __a : str , __a : str=None ):
'''simple docstring'''
__snake_case : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Any = None ):
'''simple docstring'''
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 370 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''t5'''
A__ = ['''past_key_values''']
A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : str = d_model
__snake_case : str = d_kv
__snake_case : List[Any] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Union[str, Any] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Optional[int] = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : str = layer_norm_epsilon
__snake_case : List[str] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@property
def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : Tuple = 'past_encoder_sequence + sequence'
__snake_case : Dict = {0: 'batch'}
__snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 0 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class snake_case__ :
def __init__( self : Any , __a : Dict , __a : Dict=13 , __a : Optional[Any]=32 , __a : Union[str, Any]=2 , __a : str=3 , __a : List[Any]=16 , __a : List[Any]=[1, 2, 1] , __a : Tuple=[2, 2, 4] , __a : Optional[int]=2 , __a : Union[str, Any]=2.0 , __a : Optional[int]=True , __a : List[str]=0.0 , __a : int=0.0 , __a : str=0.1 , __a : Dict="gelu" , __a : Dict=False , __a : str=True , __a : List[Any]=0.0_2 , __a : Optional[int]=1e-5 , __a : List[str]=True , __a : List[str]=None , __a : int=True , __a : Union[str, Any]=10 , __a : Optional[int]=8 , __a : Optional[Any]=["stage1", "stage2", "stage3"] , __a : Union[str, Any]=[1, 2, 3] , ) -> Dict:
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Any = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : List[Any] = num_channels
__snake_case : int = embed_dim
__snake_case : List[Any] = depths
__snake_case : Optional[Any] = num_heads
__snake_case : str = window_size
__snake_case : Tuple = mlp_ratio
__snake_case : Union[str, Any] = qkv_bias
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : List[str] = drop_path_rate
__snake_case : Any = hidden_act
__snake_case : int = use_absolute_embeddings
__snake_case : Optional[Any] = patch_norm
__snake_case : List[str] = layer_norm_eps
__snake_case : List[str] = initializer_range
__snake_case : Dict = is_training
__snake_case : List[str] = scope
__snake_case : List[Any] = use_labels
__snake_case : Union[str, Any] = type_sequence_label_size
__snake_case : Dict = encoder_stride
__snake_case : Any = out_features
__snake_case : str = out_indices
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A_ ( self : Dict , __a : int , __a : Dict , __a : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : int = MaskFormerSwinModel(config=__a )
model.to(__a )
model.eval()
__snake_case : List[str] = model(__a )
__snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : int , __a : int , __a : Any , __a : List[str] ) -> str:
'''simple docstring'''
__snake_case : Tuple = MaskFormerSwinBackbone(config=__a )
model.to(__a )
model.eval()
__snake_case : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__a ):
__snake_case : str = ['stem']
__snake_case : Tuple = MaskFormerSwinBackbone(config=__a )
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( a_ , a_ , unittest.TestCase ):
A__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = MaskFormerSwinModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
@unittest.skip('Swin does not use inputs_embeds' )
def A_ ( self : Dict ) -> str:
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking' )
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__a )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def A_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
def A_ ( self : List[str] , __a : Optional[int] , __a : Dict , __a : int , __a : str ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(__a , __a ) )
__snake_case : Tuple = outputs.hidden_states
__snake_case : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swin has a different seq_length
__snake_case : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case : int = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def A_ ( self : str ) -> str:
'''simple docstring'''
pass
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__a : Optional[int] ):
__snake_case : Optional[Any] = 0
return t
def check_equivalence(__a : Dict , __a : List[Any] , __a : List[str] , __a : Optional[int]={} ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__a , return_dict=__a , **__a )
__snake_case : List[Any] = model(**__a , return_dict=__a , **__a ).to_tuple()
def recursive_check(__a : Union[str, Any] , __a : List[str] ):
if isinstance(__a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__a , __a ):
recursive_check(__a , __a )
elif isinstance(__a , __a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__a , __a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__a ) , set_nan_tensor_to_zero(__a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}. Dict has'''
f''' `nan`: {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}.'''
) , )
recursive_check(__a , __a )
for model_class in self.all_model_classes:
__snake_case : Any = model_class(__a )
model.to(__a )
model.eval()
__snake_case : str = self._prepare_for_class(__a , __a )
__snake_case : Optional[Any] = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a )
__snake_case : str = self._prepare_for_class(__a , __a , return_labels=__a )
__snake_case : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a )
__snake_case : int = self._prepare_for_class(__a , __a )
__snake_case : Optional[int] = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a , {'output_hidden_states': True} )
__snake_case : Optional[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
__snake_case : int = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a , {'output_hidden_states': True} )
@require_torch
class snake_case__ ( unittest.TestCase , a_ ):
A__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A__ = MaskFormerSwinConfig
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__snake_case : str = backbone_class(__a )
backbone.to(__a )
backbone.eval()
__snake_case : Optional[Any] = backbone(**__a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__snake_case : Union[str, Any] = backbone(**__a , output_hidden_states=__a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__snake_case , __snake_case , __snake_case : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__snake_case : Optional[int] = backbone(**__a , output_attentions=__a )
self.assertIsNotNone(outputs.attentions )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : Any=2_81_23 ) -> Tuple:
__snake_case : Tuple = [1] * (limit + 1)
for i in range(2 ,int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 ,limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case : Union[str, Any] = set()
__snake_case : Dict = 0
for n in range(1 ,limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 350 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A__ : str = logging.get_logger(__name__)
@dataclass
class snake_case__ :
A__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
A__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
A__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : str = self.task_name.lower()
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''train'''
A__ = '''dev'''
A__ = '''test'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __a : Optional[int] , __a : int , __a : Dict = None , __a : Optional[int] = Split.train , __a : int = None , ) -> Dict:
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , __a , )
__snake_case : Dict = args
__snake_case : int = glue_processors[args.task_name]()
__snake_case : Any = glue_output_modes[args.task_name]
if isinstance(__a , __a ):
try:
__snake_case : int = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
__snake_case : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
__snake_case : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case : Optional[Any] = label_list[2], label_list[1]
__snake_case : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : List[Any] = cached_features_file + '''.lock'''
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
__snake_case : str = time.time()
__snake_case : Dict = torch.load(__a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
__snake_case : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__snake_case : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
__snake_case : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__snake_case : Tuple = examples[:limit_length]
__snake_case : str = glue_convert_examples_to_features(
__a , __a , max_length=args.max_seq_length , label_list=__a , output_mode=self.output_mode , )
__snake_case : Optional[int] = time.time()
torch.save(self.features , __a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __a : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.label_list
| 351 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
# Initialise PyTorch model
__snake_case : Optional[Any] = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case : Optional[Any] = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : List[str] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class snake_case__ :
A__ = XGLMConfig
A__ = {}
A__ = '''gelu'''
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any]=14 , __a : Optional[Any]=7 , __a : List[str]=True , __a : List[str]=True , __a : Tuple=True , __a : Union[str, Any]=99 , __a : Optional[int]=32 , __a : Tuple=2 , __a : Optional[Any]=4 , __a : List[str]=37 , __a : int="gelu" , __a : str=0.1 , __a : Any=0.1 , __a : List[str]=512 , __a : Optional[Any]=0.0_2 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : int = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Tuple = use_input_mask
__snake_case : Union[str, Any] = use_labels
__snake_case : List[str] = vocab_size
__snake_case : List[str] = d_model
__snake_case : Any = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Tuple = ffn_dim
__snake_case : Any = activation_function
__snake_case : Union[str, Any] = activation_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : List[Any] = max_position_embeddings
__snake_case : Tuple = initializer_range
__snake_case : Tuple = None
__snake_case : List[str] = 0
__snake_case : Tuple = 2
__snake_case : Any = 1
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__snake_case : List[Any] = None
if self.use_input_mask:
__snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = self.get_config()
__snake_case : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Dict ) -> int:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Optional[int] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ = False
A__ = False
A__ = False
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : str = TFXGLMModelTester(self )
__snake_case : str = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any , __a : Optional[Any]=True ) -> str:
'''simple docstring'''
__snake_case : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Dict = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__snake_case : Tuple = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__snake_case : str = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__snake_case : Union[str, Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__snake_case : List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__snake_case : Union[str, Any] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
__snake_case : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : List[Any] = 'left'
# use different length sentences to test batching
__snake_case : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__snake_case : int = tokenizer(lowerCamelCase_ , return_tensors='tf' , padding=lowerCamelCase_ )
__snake_case : Tuple = inputs['input_ids']
__snake_case : Any = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__snake_case : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__snake_case : Optional[Any] = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
__snake_case : Dict = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__snake_case : int = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
__snake_case : int = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
__snake_case : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : int = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
A__ : List[Any] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
A__ : int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def A_ ( self : Optional[int] , __a : Tuple , __a : Dict , __a : Any=False ) -> Dict:
'''simple docstring'''
if return_pvalue:
__snake_case : int = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 355 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ProphetNetTokenizer
A__ = False
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , __a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[str] = 'unwanted, running'
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file )
__snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : List[Any] = {}
for i, token in enumerate(__a ):
__snake_case : List[str] = i
__snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Any = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class snake_case__ ( A__ ):
A__ = 'gpt_neox_japanese'
def __init__( self : Dict , __a : List[str]=32000 , __a : List[str]=2560 , __a : Optional[Any]=32 , __a : int=32 , __a : List[str]=4 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=1.0_0 , __a : str=10000 , __a : Dict=2048 , __a : str=0.0_2 , __a : Tuple=1e-5 , __a : Optional[int]=True , __a : Union[str, Any]=31996 , __a : Optional[Any]=31999 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.0 , **__a : List[str] , ) -> str:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__snake_case : List[str] = vocab_size
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Dict = intermediate_multiple_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Any = rotary_pct
__snake_case : int = rotary_emb_base
__snake_case : Optional[Any] = initializer_range
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[int] = use_cache
__snake_case : Union[str, Any] = attention_dropout
__snake_case : List[Any] = hidden_dropout
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class snake_case__ ( a_ ):
A__ = '''dpr'''
def __init__( self : List[Any] , __a : List[str]=30522 , __a : Optional[Any]=768 , __a : int=12 , __a : Optional[Any]=12 , __a : List[str]=3072 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : Dict=0.1 , __a : int=512 , __a : Dict=2 , __a : Optional[Any]=0.0_2 , __a : List[Any]=1e-12 , __a : Dict=0 , __a : Optional[Any]="absolute" , __a : int = 0 , **__a : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
__snake_case : int = vocab_size
__snake_case : str = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Tuple = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Dict = projection_dim
__snake_case : Optional[int] = position_embedding_type
| 357 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : List[Any] = 8
# DPR tok
__snake_case : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__snake_case : List[Any] = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) )
__snake_case : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Optional[int] = {'unk_token': '<unk>'}
__snake_case : Optional[int] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__snake_case : int = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : str = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def A_ ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A_ ( self : List[str] ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A_ ( self : Tuple ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_dummy_dataset()
__snake_case : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__snake_case : Union[str, Any] = dataset
__snake_case : Union[str, Any] = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A_ ( self : Optional[int] , __a : bool ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_dummy_dataset()
__snake_case : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__snake_case : int = os.path.join(self.tmpdirname , 'dataset' )
__snake_case : Any = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__snake_case : List[str] = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__snake_case : Any = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case : int = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__snake_case : str = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__snake_case : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__snake_case : str = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = 1
__snake_case : int = self.get_dummy_canonical_hf_index_retriever()
__snake_case : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : List[Any] = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__snake_case : int = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__snake_case : Dict = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : str = 1
__snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__snake_case : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : Any = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : Dict = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 1
__snake_case : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : Tuple = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : Tuple = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : int = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = 1
__snake_case : Union[str, Any] = self.get_dummy_legacy_index_retriever()
__snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : List[Any] = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : List[Any] = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
import torch
__snake_case : Dict = 1
__snake_case : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
__snake_case : int = [[5, 7], [10, 11]]
__snake_case : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Dict = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__snake_case , __snake_case , __snake_case : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__snake_case : Union[str, Any] = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_dpr_ctx_encoder_tokenizer()
__snake_case : Any = 1
__snake_case : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__snake_case : List[str] = [[5, 7], [10, 11]]
__snake_case : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : int = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A__ : str = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A__ : Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A__ : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class snake_case__ ( UpperCAmelCase__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['input_ids', 'attention_mask']
A__ = MBartTokenizer
A__ = []
A__ = []
def __init__( self : Dict , __a : Optional[int]=None , __a : Any=None , __a : List[Any]="<s>" , __a : List[Any]="</s>" , __a : List[str]="</s>" , __a : str="<s>" , __a : Any="<unk>" , __a : List[str]="<pad>" , __a : List[str]="<mask>" , __a : str=None , __a : List[Any]=None , __a : List[str]=None , **__a : Any , ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__snake_case : int = vocab_file
__snake_case : Optional[int] = False if not self.vocab_file else True
__snake_case : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__snake_case : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : int = src_lang if src_lang is not None else """en_XX"""
__snake_case : List[Any] = self.convert_tokens_to_ids(self._src_lang )
__snake_case : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self : int , __a : List[str] ) -> None:
'''simple docstring'''
__snake_case : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Dict , __a : str , __a : List[str] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : List[Any] , __a : Any , __a : Tuple = None ) -> List[int]:
'''simple docstring'''
__snake_case : str = [self.sep_token_id]
__snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : Any , __a : Any , __a : Union[str, Any] , __a : Optional[Any] , __a : int , **__a : str ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__snake_case : List[str] = src_lang
__snake_case : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__snake_case : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__snake_case : Tuple = tgt_lang_id
return inputs
def A_ ( self : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any] = "en_XX" , __a : List[Any] = None , __a : Dict = "ro_RO" , **__a : List[Any] , ) -> BatchEncoding:
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : Dict = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : List[Any] , __a : Dict ) -> None:
'''simple docstring'''
__snake_case : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__snake_case : Any = []
__snake_case : Tuple = [self.eos_token_id, self.cur_lang_code]
__snake_case : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
__snake_case : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__snake_case : int = []
__snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Tuple , __a : str , __a : Tuple = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__snake_case : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 359 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = ['''image_processor''', '''tokenizer''']
A__ = '''OwlViTImageProcessor'''
A__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : str , __a : Optional[Any]=None , __a : str=None , **__a : str ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase_ , )
__snake_case : List[str] = kwargs.pop('feature_extractor' )
__snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : Optional[Any] , __a : Any=None , __a : int=None , __a : int=None , __a : Optional[int]="max_length" , __a : str="np" , **__a : Dict ) -> Optional[Any]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not isinstance(text[0] , lowerCamelCase_ )):
__snake_case : Dict = [self.tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(text[0] , lowerCamelCase_ ):
__snake_case : Optional[int] = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase_ ) != max_num_queries:
__snake_case : Tuple = t + [' '] * (max_num_queries - len(lowerCamelCase_ ))
__snake_case : Union[str, Any] = self.tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
encodings.append(lowerCamelCase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__snake_case : str = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__snake_case : int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : Optional[Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__snake_case : Optional[int] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : List[Any] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__snake_case : Any = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__snake_case : Optional[int] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__snake_case : Optional[int] = BatchEncoding()
__snake_case : List[str] = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Dict = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ).pixel_values
__snake_case : int = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
__snake_case : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def A_ ( self : str , *__a : Dict , **__a : Any ) -> int:
'''simple docstring'''
return self.image_processor.post_process(*lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : int , *__a : Optional[int] , **__a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : str , *__a : Union[str, Any] , **__a : int ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : List[str] , *__a : str , **__a : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : Dict , *__a : List[str] , **__a : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase_ , )
return self.image_processor_class
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCamelCase_ , )
return self.image_processor
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
from math import factorial
class snake_case__ :
def __init__( self : Union[str, Any] , __a : int , __a : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : str = real
if isinstance(snake_case__ , snake_case__ ):
__snake_case : Any = [1] * rank
else:
__snake_case : Optional[Any] = rank
def __repr__( self : Optional[int] ) -> Dict:
'''simple docstring'''
return (
f'''{self.real}+'''
f'''{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__( self : int , __a : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
__snake_case : Tuple = self.duals.copy()
__snake_case : int = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
__snake_case : List[Any] = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
A__ = __add__
def __sub__( self : Union[str, Any] , __a : int ) -> Optional[int]:
'''simple docstring'''
return self + other * -1
def __mul__( self : Optional[int] , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case : List[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
__snake_case : str = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
A__ = __mul__
def __truediv__( self : List[str] , __a : Any ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__( self : List[Any] , __a : Optional[int] ) -> Any:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case : Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__( self : Dict , __a : Any ) -> Union[str, Any]:
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__snake_case : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
if not callable(__lowerCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(__lowerCAmelCase ,(float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
__snake_case : Union[str, Any] = Dual(__lowerCAmelCase ,1 )
__snake_case : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a_ ( _UpperCAmelCase : str ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 361 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A__ : List[str] = logging.get_logger(__name__)
class snake_case__ ( a__ ):
A__ = """upernet"""
def __init__( self : int , __a : List[str]=None , __a : List[str]=512 , __a : Tuple=0.0_2 , __a : Optional[int]=[1, 2, 3, 6] , __a : Union[str, Any]=True , __a : Tuple=0.4 , __a : Dict=384 , __a : Optional[Any]=256 , __a : Any=1 , __a : Union[str, Any]=False , __a : Any=255 , **__a : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__snake_case : Tuple = CONFIG_MAPPING["resnet"](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__snake_case : Optional[int] = backbone_config.get('model_type' )
__snake_case : int = CONFIG_MAPPING[backbone_model_type]
__snake_case : Dict = config_class.from_dict(lowerCAmelCase__ )
__snake_case : Optional[int] = backbone_config
__snake_case : Dict = hidden_size
__snake_case : int = initializer_range
__snake_case : List[str] = pool_scales
__snake_case : Union[str, Any] = use_auxiliary_head
__snake_case : List[Any] = auxiliary_loss_weight
__snake_case : int = auxiliary_in_channels
__snake_case : int = auxiliary_channels
__snake_case : List[Any] = auxiliary_num_convs
__snake_case : List[Any] = auxiliary_concat_input
__snake_case : Union[str, Any] = loss_ignore_index
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case : List[str] = copy.deepcopy(self.__dict__ )
__snake_case : Optional[Any] = self.backbone_config.to_dict()
__snake_case : Optional[Any] = self.__class__.model_type
return output
| 362 |
'''simple docstring'''
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case : List[str] = 0
# the cached sizes of the previous chains
__snake_case : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__snake_case : Optional[int] = set()
__snake_case : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
A__ : Dict = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
A__ : Any = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
A__ : Any = BeautifulSoup(res.text, '''html.parser''')
A__ : List[str] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""") | 363 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 | 0 |